repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xray/xray | xarray/plot/facetgrid.py | 1 | 21774 | import functools
import itertools
import warnings
import numpy as np
from ..core.formatting import format_item
from .utils import (
_infer_xy_labels,
_process_cmap_cbar_kwargs,
import_matplotlib_pyplot,
label_from_attrs,
)
# Overrides axes.labelsize, xtick.major.size, ytick.major.size
# from mpl.rcParams
_FONTSIZE = "small"
# For major ticks on x, y axes
_NTICKS = 5
def _nicetitle(coord, value, maxchar, template):
"""
Put coord, value in template and truncate at maxchar
"""
prettyvalue = format_item(value, quote_strings=False)
title = template.format(coord=coord, value=prettyvalue)
if len(title) > maxchar:
title = title[: (maxchar - 3)] + "..."
return title
class FacetGrid:
"""
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axes : numpy object array
Contains axes in corresponding position, as returned from
plt.subplots
col_labels : list
list of :class:`matplotlib.text.Text` instances corresponding to column titles.
row_labels : list
list of :class:`matplotlib.text.Text` instances corresponding to row titles.
fig : matplotlib.Figure
The figure containing all the axes
name_dicts : numpy object array
Contains dictionaries mapping coordinate names to values. None is
used as a sentinel value for axes which should remain empty, ie.
sometimes the bottom right grid
"""
def __init__(
self,
data,
col=None,
row=None,
col_wrap=None,
sharex=True,
sharey=True,
figsize=None,
aspect=1,
size=3,
subplot_kws=None,
):
"""
Parameters
----------
data : DataArray
xarray DataArray to be plotted
row, col : strings
Dimesion names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
sharex : bool, optional
If true, the facets will share x axes
sharey : bool, optional
If true, the facets will share y axes
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots
"""
plt = import_matplotlib_pyplot()
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError(
"Coordinates used for faceting cannot "
"contain repeated (nonunique) values."
)
# single_group is the grouping variable, if there is exactly one
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn("Ignoring col_wrap since both col and row " "were passed")
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError("Pass a coordinate name as an argument for row or col")
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axes = plt.subplots(
nrow,
ncol,
sharex=sharex,
sharey=sharey,
squeeze=False,
figsize=figsize,
subplot_kw=subplot_kws,
)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].values) if col else []
row_names = list(data[row].values) if row else []
if single_group:
full = [{single_group: x} for x in data[single_group].values]
empty = [None for x in range(nrow * ncol - len(full))]
name_dicts = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dicts = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dicts).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
self.figlegend = None
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self.row_labels = [None] * nrow
self.col_labels = [None] * ncol
self._x_var = None
self._y_var = None
self._cmap_extend = None
self._mappables = []
self._finalized = False
@property
def _left_axes(self):
return self.axes[:, 0]
@property
def _bottom_axes(self):
return self.axes[-1, :]
def map_dataarray(self, func, x, y, **kwargs):
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
kwargs :
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
if kwargs.get("cbar_ax", None) is not None:
raise ValueError("cbar_ax not supported by FacetGrid.")
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data.values, **kwargs
)
self._cmap_extend = cmap_params.get("extend")
# Order is important
func_kwargs = {
k: v
for k, v in kwargs.items()
if k not in {"cmap", "colors", "cbar_kwargs", "levels"}
}
func_kwargs.update(cmap_params)
func_kwargs.update({"add_colorbar": False, "add_labels": False})
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]],
x=x,
y=y,
imshow=func.__name__ == "imshow",
rgb=kwargs.get("rgb", None),
)
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset, x=x, y=y, ax=ax, **func_kwargs, _is_facetgrid=True
)
self._mappables.append(mappable)
self._finalize_grid(x, y)
if kwargs.get("add_colorbar", True):
self.add_colorbar(**cbar_kwargs)
return self
def map_dataarray_line(
self, func, x, y, hue, add_legend=True, _labels=None, **kwargs
):
from .plot import _infer_line_data
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset,
x=x,
y=y,
ax=ax,
hue=hue,
add_legend=False,
_labels=False,
**kwargs,
)
self._mappables.append(mappable)
_, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue
)
self._hue_var = hueplt
self._hue_label = huelabel
self._finalize_grid(xlabel, ylabel)
if add_legend and hueplt is not None and huelabel is not None:
self.add_legend()
return self
def map_dataset(
self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs
):
from .dataset_plot import _infer_meta_data, _parse_size
kwargs["add_guide"] = False
kwargs["_is_facetgrid"] = True
if kwargs.get("markersize", None):
kwargs["size_mapping"] = _parse_size(
self.data[kwargs["markersize"]], kwargs.pop("size_norm", None)
)
meta_data = _infer_meta_data(self.data, x, y, hue, hue_style, add_guide)
kwargs["meta_data"] = meta_data
if hue and meta_data["hue_style"] == "continuous":
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data[hue].values, **kwargs
)
kwargs["meta_data"]["cmap_params"] = cmap_params
kwargs["meta_data"]["cbar_kwargs"] = cbar_kwargs
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
maybe_mappable = func(
ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs
)
# TODO: this is needed to get legends to work.
# but maybe_mappable is a list in that case :/
self._mappables.append(maybe_mappable)
self._finalize_grid(meta_data["xlabel"], meta_data["ylabel"])
if hue:
self._hue_label = meta_data.pop("hue_label", None)
if meta_data["add_legend"]:
self._hue_var = meta_data["hue"]
self.add_legend()
elif meta_data["add_colorbar"]:
self.add_colorbar(label=self._hue_label, **cbar_kwargs)
return self
def _finalize_grid(self, *axlabels):
"""Finalize the annotations and layout."""
if not self._finalized:
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is None:
ax.set_visible(False)
self._finalized = True
def add_legend(self, **kwargs):
figlegend = self.fig.legend(
handles=self._mappables[-1],
labels=list(self._hue_var.values),
title=self._hue_label,
loc="center right",
**kwargs,
)
self.figlegend = figlegend
# Draw the plot to set the bounding boxes correctly
self.fig.draw(self.fig.canvas.get_renderer())
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
self.fig.draw(self.fig.canvas.get_renderer())
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width) + 0.02
# margin = .01
# _space_needed = margin + space_needed
right = 1 - space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
def add_colorbar(self, **kwargs):
"""Draw a colorbar"""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault("extend", self._cmap_extend)
# dont pass extend as kwarg if it is in the mappable
if hasattr(self._mappables[-1], "extend"):
kwargs.pop("extend", None)
if "label" not in kwargs:
kwargs.setdefault("label", label_from_attrs(self.data))
self.cbar = self.fig.colorbar(
self._mappables[-1], ax=list(self.axes.flat), **kwargs
)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
# x_var is a string
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = label_from_attrs(self.data[self._x_var])
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = label_from_attrs(self.data[self._y_var])
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_titles(self, template="{coord} = {value}", maxchar=30, size=None, **kwargs):
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for plot titles containing {coord} and {value}
maxchar : int
Truncate titles at maxchar
kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
if size is None:
size = mpl.rcParams["axes.labelsize"]
nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value, maxchar=maxchar)
ax.set_title(title, size=size, **kwargs)
else:
# The row titles on the right edge of the grid
for index, (ax, row_name, handle) in enumerate(
zip(self.axes[:, -1], self.row_names, self.row_labels)
):
title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)
if not handle:
self.row_labels[index] = ax.annotate(
title,
xy=(1.02, 0.5),
xycoords="axes fraction",
rotation=270,
ha="left",
va="center",
**kwargs,
)
else:
handle.set_text(title)
# The column titles on the top row
for index, (ax, col_name, handle) in enumerate(
zip(self.axes[0, :], self.col_names, self.col_labels)
):
title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)
if not handle:
self.col_labels[index] = ax.set_title(title, size=size, **kwargs)
else:
handle.set_text(title)
return self
def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE):
"""
Set and control tick behavior
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axes.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(
ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()
):
tick.label1.set_fontsize(fontsize)
return self
def map(self, func, *args, **kwargs):
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
plt = import_matplotlib_pyplot()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].values for a in args]
maybe_mappable = func(*innerargs, **kwargs)
# TODO: better way to verify that an artist is mappable?
# https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522
if maybe_mappable and hasattr(maybe_mappable, "autoscale_None"):
self._mappables.append(maybe_mappable)
self._finalize_grid(*args[:2])
return self
def _easy_facetgrid(
data,
plotfunc,
kind,
x=None,
y=None,
row=None,
col=None,
col_wrap=None,
sharex=True,
sharey=True,
aspect=None,
size=None,
subplot_kws=None,
ax=None,
figsize=None,
**kwargs,
):
"""
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods
kwargs are the arguments to 2d plotting method
"""
if ax is not None:
raise ValueError("Can't use axes when making faceted plots.")
if aspect is None:
aspect = 1
if size is None:
size = 3
elif figsize is not None:
raise ValueError("cannot provide both `figsize` and `size` arguments")
g = FacetGrid(
data=data,
col=col,
row=row,
col_wrap=col_wrap,
sharex=sharex,
sharey=sharey,
figsize=figsize,
aspect=aspect,
size=size,
subplot_kws=subplot_kws,
)
if kind == "line":
return g.map_dataarray_line(plotfunc, x, y, **kwargs)
if kind == "dataarray":
return g.map_dataarray(plotfunc, x, y, **kwargs)
if kind == "dataset":
return g.map_dataset(plotfunc, x, y, **kwargs)
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/cluster/tests/test_k_means.py | 63 | 26190 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
| bsd-3-clause |
wlamond/scikit-learn | sklearn/metrics/classification.py | 4 | 72788 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type == "binary":
unique_values = np.union1d(y_true, y_pred)
if len(unique_values) > 2:
y_type = "multiclass"
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
The reported averages are a prevalence-weighted macro-average across
classes (equivalent to :func:`precision_recall_fscore_support` with
``average='weighted'``).
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if target_names is not None and len(labels) != len(target_names):
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
last_line_heading = 'avg / total'
if target_names is None:
target_names = [u'%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(target_names, p, r, f1, s)
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s),
np.sum(s),
width=width, digits=digits)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
classes : array, shape = [n_labels], optional
Integer array of labels.
.. deprecated:: 0.18
This parameter has been deprecated in favor of ``labels`` in
version 0.18 and will be removed in 0.20. Use ``labels`` instead.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
gfyoung/pandas | pandas/tests/arrays/categorical/test_sorting.py | 3 | 5040 | import numpy as np
import pytest
from pandas import Categorical, Index
import pandas._testing as tm
class TestCategoricalSort:
def test_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(
c.argsort(ascending=True), expected, check_dtype=False
)
expected = expected[::-1]
tm.assert_numpy_array_equal(
c.argsort(ascending=False), expected, check_dtype=False
)
def test_numpy_argsort(self):
c = Categorical([5, 3, 1, 4, 2], ordered=True)
expected = np.array([2, 4, 1, 3, 0])
tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False)
tm.assert_numpy_array_equal(
np.argsort(c, kind="mergesort"), expected, check_dtype=False
)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, axis=0)
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(c, order="C")
def test_sort_values(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
cat = Categorical(
["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True
)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# sort (inplace order)
cat1 = cat.copy()
orig_codes = cat1._codes
cat1.sort_values(inplace=True)
assert cat1._codes is orig_codes
exp = np.array(["a", "b", "c", "d"], dtype=object)
tm.assert_numpy_array_equal(cat1.__array__(), exp)
tm.assert_index_equal(res.categories, cat.categories)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
def test_sort_values_na_position(self):
# see gh-12882
cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
exp_categories = Index([2, 5])
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values() # default arguments
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
res = cat.sort_values(ascending=True, na_position="first")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
res = cat.sort_values(ascending=False, na_position="first")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
res = cat.sort_values(ascending=True, na_position="last")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
res = cat.sort_values(ascending=False, na_position="last")
tm.assert_numpy_array_equal(res.__array__(), exp)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position="last")
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position="first")
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_numpy_array_equal(res.__array__(), exp_val)
tm.assert_index_equal(res.categories, exp_categories)
| bsd-3-clause |
printedheart/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_bernoulliGBM.py | 5 | 2569 | import sys, os
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def bernoulliGBM():
#Log.info("Importing prostate.csv data...\n")
prostate_train = h2o.import_file(path=h2o.locate("smalldata/logreg/prostate_train.csv"))
#Log.info("Converting CAPSULE and RACE columns to factors...\n")
prostate_train["CAPSULE"] = prostate_train["CAPSULE"].asfactor()
#Log.info("H2O Summary of prostate frame:\n")
#prostate.summary()
# Import prostate_train.csv as numpy array for scikit comparison
trainData = np.loadtxt(h2o.locate("smalldata/logreg/prostate_train.csv"), delimiter=',', skiprows=1)
trainDataResponse = trainData[:,0]
trainDataFeatures = trainData[:,1:]
ntrees = 100
learning_rate = 0.1
depth = 5
min_rows = 10
# Build H2O GBM classification model:
#Log.info(paste("H2O GBM with parameters:\ndistribution = 'bernoulli', ntrees = ", ntrees, ", max_depth = 5,
# min_rows = 10, learn_rate = 0.1\n", sep = ""))
gbm_h2o = h2o.gbm(x=prostate_train[1:], y=prostate_train["CAPSULE"], ntrees=ntrees, learn_rate=learning_rate,
max_depth=depth, min_rows=min_rows, distribution="bernoulli")
# Build scikit GBM classification model
#Log.info("scikit GBM with same parameters\n")
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learning_rate, n_estimators=ntrees, max_depth=depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(trainDataFeatures,trainDataResponse)
#Log.info("Importing prostate_test.csv data...\n")
prostate_test = h2o.import_file(path=h2o.locate("smalldata/logreg/prostate_test.csv"))
#Log.info("Converting CAPSULE and RACE columns to factors...\n")
prostate_test["CAPSULE"] = prostate_test["CAPSULE"].asfactor()
# Import prostate_test.csv as numpy array for scikit comparison
testData = np.loadtxt(h2o.locate("smalldata/logreg/prostate_test.csv"), delimiter=',', skiprows=1)
testDataResponse = testData[:,0]
testDataFeatures = testData[:,1:]
# Score on the test data and compare results
# scikit
auc_sci = roc_auc_score(testDataResponse, gbm_sci.predict_proba(testDataFeatures)[:,1])
# h2o
gbm_perf = gbm_h2o.model_performance(prostate_test)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
tests.run_test(sys.argv, bernoulliGBM)
| apache-2.0 |
OSUrobotics/privacy-interfaces | filtering/probability_filters/scripts/test/test_xyz_to_rpy.py | 1 | 2816 | #!/usr/bin/env python
# TEST PLATFORM for step #3 below.
# 3) Convert <x, y, a> to <r, theta, yaw>
# 4) Choose four corner poses (each containing one of r_min, r_max, yaw_min, yaw_max)
# 5) Project our PolygonStamped onto those four poses
# 6) (Run convex hull and then) plot the rectangle (polygon)
import rospy
import numpy
import random
from math import sqrt, atan2
from tf.transformations import quaternion_about_axis, quaternion_matrix
from matplotlib import pyplot
# Premise: object is at (0, 0) and robot has random (x, y, theta) centered at (0, 0, 0)
if __name__ == '__main__':
# Locate object
obj = [0.0, 0.0, 0.0]
# Locate robots
dev = 5.0
robots_xyw = []
for i in range(1000):
robot = [random.gauss(0.0, dev),
random.gauss(0.0, dev),
random.gauss(0.0, dev)]
robots_xyw.append(robot)
# Add robots for permuted min & max bounds
x = [robot[0] for robot in robots_xyw]
y = [robot[1] for robot in robots_xyw]
w = [robot[2] for robot in robots_xyw]
ranges = [[min(x), max(x)], [min(y), max(y)], [min(w), max(w)]]
for x_i in ranges[0]:
for y_i in ranges[1]:
for w_i in ranges[2]:
robots_xyw.append([x_i, y_i, w_i])
# Do conversions
robots_rty = []
for robot in robots_xyw:
# Calculate radius
dx = obj[0] - robot[0]
dy = obj[1] - robot[1]
r = sqrt(dx**2 + dy**2)
offset = [dx, dy, 0.0] # vector
offset = [el / r for el in offset] # normalize
# Calculate theta (angle of vector from object to robot)
theta = atan2(-1 * offset[1], -1 * offset[0]) # positive is counter-clockwise
# Calculate heading vector of robot
q_robot = quaternion_about_axis(robot[2], (0, 0, 1))
R_robot = quaternion_matrix(q_robot)
heading_robot = numpy.matrix([1, 0, 0, 1]) * numpy.matrix(R_robot).I
heading_robot /= heading_robot[0, 3] # ensure homogeneity isn't messing stuff up
heading_robot = heading_robot[0, 0:3].tolist()[0] # convert from homogeneous to...not
# Calculate camera yaw (angle from gaze vector to object line-of-sight vector)
cosine = numpy.dot(heading_robot, offset)
cross = numpy.cross(heading_robot, offset)
sine = cross[2]
yaw = -1 * atan2(sine, cosine) # positive is counter-clockwise
robots_rty.append([r, theta, yaw])
robots_xywrty = [xyz + rty for xyz, rty in zip(robots_xyw, robots_rty)]
radii = [robot[3] for robot in robots_xywrty]
yaws = [robot[5] for robot in robots_xywrty]
for robot in robots_xywrty:
print robot
print min(radii), max(radii)
print min(yaws), max(yaws)
# RESULT: extremes in XYW do *not* yield the extremes in RTY
| mit |
HIPS/pgmult | setup.py | 2 | 1189 | from distutils.core import setup
import numpy as np
from Cython.Build import cythonize
setup(
name='pgmult',
version='0.1',
description=
"Learning and inference for models with multinomial observations and "
"underlying Gaussian correlation structure. Examples include correlated "
"topic model, multinomial linear dynamical systems, and multinomial "
"Gaussian processes. ",
author='Scott W. Linderman and Matthew James Johnson',
author_email='[email protected], [email protected]',
license="MIT",
url='https://github.com/HIPS/pgmult',
packages=['pgmult'],
install_requires=[
'Cython >= 0.20.1', 'numpy', 'scipy', 'matplotlib',
'pybasicbayes', 'pypolyagamma', 'gslrandom', 'pylds'],
ext_modules=cythonize('pgmult/**/*.pyx'),
include_dirs=[np.get_include(),],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++',
],
keywords=[
'multinomial', 'polya', 'gamma', 'correlated topic model', 'ctm',
'lds', 'linear dynamical system', 'gaussian process', 'gp'],
platforms="ALL"
)
| mit |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| gpl-3.0 |
aewhatley/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
michaelpacer/networkx | examples/drawing/atlas.py | 54 | 2609 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| bsd-3-clause |
SeonghoBaek/RealtimeCamera | openface/training/plot-loss.py | 8 | 3032 | #!/usr/bin/env python3
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
import pandas as pd
import os
import sys
scriptDir = os.path.dirname(os.path.realpath(__file__))
plotDir = os.path.join(scriptDir, 'plots')
# workDir = os.path.join(scriptDir, 'work')
def plot(workDirs):
trainDfs = []
testDfs = []
for d in workDirs:
trainF = os.path.join(d, 'train.log')
testF = os.path.join(d, 'test.log')
trainDfs.append(pd.read_csv(trainF, sep='\t'))
testDfs.append(pd.read_csv(testF, sep='\t'))
if len(trainDfs[-1]) != len(testDfs[-1]):
print("Error: Train/test dataframe shapes "
"for '{}' don't match: {}, {}".format(
d, trainDfs[-1].shape, testDfs[-1].shape))
sys.exit(-1)
trainDf = pd.concat(trainDfs, ignore_index=True)
testDf = pd.concat(testDfs, ignore_index=True)
# print("train, test:")
# print("\n".join(["{:0.2e}, {:0.2e}".format(x, y) for (x, y) in
# zip(trainDf['avg triplet loss (train set)'].values[-5:],
# testDf['avg triplet loss (test set)'].values[-5:])]))
fig, ax = plt.subplots(1, 1)
trainDf.index += 1
trainDf['avg triplet loss (train set)'].plot(ax=ax)
plt.xlabel("Epoch")
plt.ylabel("Average Triplet Loss, Training")
plt.ylim(ymin=0)
# plt.xlim(xmin=1)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='-', alpha=0.2)
plt.minorticks_on()
# ax.set_yscale('log')
d = os.path.join(plotDir, "train-loss.pdf")
fig.savefig(d)
print("Created {}".format(d))
fig, ax = plt.subplots(1, 1)
testDf.index += 1
testDf['lfwAcc'].plot(ax=ax)
plt.xlabel("Epoch")
plt.ylabel("LFW Accuracy")
plt.ylim(ymin=0, ymax=1)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='-', alpha=0.2)
plt.minorticks_on()
# plt.xlim(xmin=1)
# ax.set_yscale('log')
d = os.path.join(plotDir, "lfw-accuracy.pdf")
fig.savefig(d)
print("Created {}".format(d))
if __name__ == '__main__':
os.makedirs(plotDir, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('workDirs', type=str, nargs='+')
args = parser.parse_args()
plot(args.workDirs)
| apache-2.0 |
jereze/scikit-learn | sklearn/datasets/svmlight_format.py | 79 | 15976 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
tosolveit/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
vishalpant/Sentiment-analysis-of-streaming-tweets | test.py | 1 | 1528 | import load
import os
import Sentiment
import Filter
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
def evaluate_model(target_true,target_predicted):
print("The confusion matrix is as follows:")
print(confusion_matrix(target_true,target_predicted))
print("The classification report is as follows:")
print(classification_report(target_true,target_predicted))
print("The accuracy score is {:.2%}".format(accuracy_score(target_true,target_predicted)))
if __name__ == "__main__":
if not os.path.isfile("sentiments.pickle") or not os.path.isfile("tweets.pickle"):
datafile = str(input("Enter path of training data(Should be in .csv):"))
# replace 4th parameter with the column number of your tweets
# replace 5th parameter with the column number of your sentiments
data, target = load.load_data(datafile, ",", '"', 5, 0)
load.save_sentiments(target)
load.save_tweets(data)
else:
data = load.load_tweets()
target = load.load_sentiments()
tf_idf = Filter.filter(data)
#test data is taken to be 40% of total data and remaining 60% is training data
data_train, data_test, target_train, target_test = Sentiment.data_generate(tf_idf, 0.4, target)
classifier = Sentiment.learn(data_train, target_train)
prediction = Sentiment.predict(data_test, classifier)
evaluate_model(target_test, prediction) | mit |
james4424/nest-simulator | topology/pynest/hl_api.py | 4 | 68298 | # -*- coding: utf-8 -*-
#
# hl_api.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
**High-level API of PyNEST Topology Module**
This file defines the user-level functions of NEST's Python interface to the
Topology module. The basic approach is the same as for the PyNEST interface to
NEST:
1. Function names are the same as in SLI.
2. Nodes are identified by their GIDs.
3. GIDs are always given as tuples or lists of integer(s).
4. Commands returning GIDs return them as tuples.
5. Other arguments can be
* single items that are applied to all entries in a GID list
* a list of the same length as the given list of GID(s) where each item is
matched with the pertaining GID.
**Example**
::
layers = CreateLayer(({...}, {...}, {...}))
creates three layers and returns a tuple of three GIDs.
::
ConnectLayers(layers[:2], layers[1:], {...})
connects `layers[0]` to `layers[1]` and `layers[1]` to `layers[2]` \
using the same dictionary to specify both connections.
::
ConnectLayers(layers[:2], layers[1:], ({...}, {...}))
connects the same layers, but the `layers[0]` to `layers[1]` connection
is specified by the first dictionary, the `layers[1]` to `layers[2]`
connection by the second.
:Authors:
Kittel Austvoll,
Hans Ekkehard Plesser,
Hakon Enger
"""
import nest
def topology_func(slifunc, *args):
"""
Execute SLI function `slifunc` with arguments `args` in Topology namespace.
Parameters
----------
slifunc : str
SLI namespace expression
Other parameters
----------------
args : dict
An arbitrary number of arguments
Returns
-------
out :
Values from SLI function `slifunc`
See also
--------
nest.sli_func
"""
return nest.sli_func(slifunc, *args)
class Mask(object):
"""
Class for spatial masks.
Masks are used when creating connections in the Topology module. A mask
describes which area of the pool layer shall be searched for nodes to
connect for any given node in the driver layer. Masks are created using
the ``CreateMask`` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Masks must be created using the CreateMask command."""
if not isinstance(datum, nest.SLIDatum) or datum.dtype != "masktype":
raise TypeError("expected mask Datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Mask):
return NotImplemented
return Mask(topology_func(op, self._datum, other._datum))
def __or__(self, other):
return self._binop("or", other)
def __and__(self, other):
return self._binop("and", other)
def __sub__(self, other):
return self._binop("sub", other)
def Inside(self, point):
"""
Test if a point is inside a mask.
Parameters
----------
point : tuple/list of float values
Coordinate of point
Returns
-------
out : bool
True if the point is inside the mask, False otherwise
"""
return topology_func("Inside", point, self._datum)
def CreateMask(masktype, specs, anchor=None):
"""
Create a spatial mask for connections.
Masks are used when creating connections in the Topology module. A mask
describes the area of the pool layer that is searched for nodes to
connect for any given node in the driver layer. Several mask types
are available. Examples are the grid region, the rectangular, circular or
doughnut region.
The command ``CreateMask`` creates a Mask object which may be combined
with other ``Mask`` objects using Boolean operators. The mask is specified
in a dictionary.
``Mask`` objects can be passed to ``ConnectLayers`` in a
connection dictionary with the key `'mask'`.
Parameters
----------
masktype : str, ['rectangular' | 'circular' | 'doughnut'] for 2D masks, \
['box' | 'spherical'] for 3D masks, ['grid'] only for grid-based layers in 2D
The mask name corresponds to the geometrical shape of the mask. There
are different types for 2- and 3-dimensional layers.
specs : dict
Dictionary specifying the parameters of the provided `masktype`,
see **Notes**.
anchor : [tuple/list of floats | dict with the keys `'column'` and \
`'row'` (for grid masks only)], optional, default: None
By providing anchor coordinates, the location of the mask relative to
the driver node can be changed. The list of coordinates has a length
of 2 or 3 dependent on the number of dimensions.
Returns
-------
out : ``Mask`` object
See also
--------
ConnectLayers: Connect two (lists of) layers pairwise according to
specified projections. ``Mask`` objects can be passed in a connection
dictionary with the key `'mask'`.
Notes
-----
-
**Mask types**
Available mask types (`masktype`) and their corresponding parameter
dictionaries:
* 2D free and grid-based layers
::
'rectangular' :
{'lower_left' : [float, float],
'upper_right': [float, float]}
#or
'circular' :
{'radius' : float}
#or
'doughnut' :
{'inner_radius' : float,
'outer_radius' : float}
* 3D free and grid-based layers
::
'box' :
{'lower_left' : [float, float, float],
'upper_right' : [float, float, float]}
#or
'spherical' :
{'radius' : float}
* 2D grid-based layers only
::
'grid' :
{'rows' : float,
'columns' : float}
By default the top-left corner of a grid mask, i.e., the grid
mask element with grid index [0, 0], is aligned with the driver
node. It can be changed by means of the 'anchor' parameter:
::
'anchor' :
{'row' : float,
'column' : float}
**Example**
::
import nest.topology as tp
# create a grid-based layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# create a circular mask
m = tp.CreateMask('circular', {'radius': 0.2})
# connectivity specifications
conndict = {'connection_type': 'divergent',
'mask' : m}
# connect layer l with itself according to the specifications
tp.ConnectLayers(l, l, conndict)
"""
if anchor is None:
return Mask(topology_func('CreateMask', {masktype: specs}))
else:
return Mask(
topology_func('CreateMask', {masktype: specs, 'anchor': anchor}))
class Parameter(object):
"""
Class for parameters for distance dependency or randomization.
Parameters are spatial functions which are used when creating
connections in the Topology module. A parameter may be used as a
probability kernel when creating connections or as synaptic parameters
(such as weight and delay). Parameters are created using the
``CreateParameter`` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Parameters must be created using the CreateParameter command."""
if not isinstance(datum,
nest.SLIDatum) or datum.dtype != "parametertype":
raise TypeError("expected parameter datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Parameter):
return NotImplemented
return Parameter(topology_func(op, self._datum, other._datum))
def __add__(self, other):
return self._binop("add", other)
def __sub__(self, other):
return self._binop("sub", other)
def __mul__(self, other):
return self._binop("mul", other)
def __div__(self, other):
return self._binop("div", other)
def __truediv__(self, other):
return self._binop("div", other)
def GetValue(self, point):
"""
Compute value of parameter at a point.
Parameters
----------
point : tuple/list of float values
coordinate of point
Returns
-------
out : value
The value of the parameter at the point
See also
--------
CreateParameter : create parameter for e.g., distance dependency
Notes
-----
-
**Example**
::
import nest.topology as tp
#linear dependent parameter
P = tp.CreateParameter('linear', {'a' : 2., 'c' : 0.})
#get out value
P.GetValue(point=[3., 4.])
"""
return topology_func("GetValue", point, self._datum)
def CreateParameter(parametertype, specs):
"""
Create a parameter for distance dependency or randomization.
Parameters are (spatial) functions which are used when creating
connections in the Topology module for distance dependency or
randomization. This command creates a Parameter object which may be
combined with other ``Parameter`` objects using arithmetic operators.
The parameter is specified in a dictionary.
A parameter may be used as a probability kernel when creating connections
or as synaptic parameters (such as weight and delay), i.e., for specifying
the parameters `'kernel'`, `'weights'` and `'delays'` in the
connection dictionary passed to ``ConnectLayers``.
Parameters
----------
parametertype : {'constant', 'linear', 'exponential', 'gaussian', \
'gaussian2D', 'uniform', 'normal', 'lognormal'}
Function types with or without distance dependency
specs : dict
Dictionary specifying the parameters of the provided
`'parametertype'`, see **Notes**.
Returns
-------
out : ``Parameter`` object
See also
--------
ConnectLayers : Connect two (lists of) layers pairwise according to
specified projections. Parameters can be used to specify the
parameters `'kernel'`, `'weights'` and `'delays'` in the
connection dictionary.
Parameters : Class for parameters for distance dependency or randomization.
Notes
-----
-
**Parameter types**
Available parameter types (`parametertype` parameter), their function and
acceptable keys for their corresponding specification dictionaries
* Constant
::
'constant' :
{'value' : float} # constant value
* With dependence on the distance `d`
::
# p(d) = c + a * d
'linear' :
{'a' : float, # slope, default: 1.0
'c' : float} # constant offset, default: 0.0
# or
# p(d) = c + a*exp(-d/tau)
'exponential' :
{'a' : float, # coefficient of exponential term, default: 1.0
'c' : float, # constant offset, default: 0.0
'tau' : float} # length scale factor, default: 1.0
# or
# p(d) = c + p_center*exp(-(d-mean)^2/(2*sigma^2))
'gaussian' :
{'p_center' : float, # value at center, default: 1.0
'mean' : float, # distance to center, default: 0.0
'sigma' : float, # width of Gaussian, default: 1.0
'c' : float} # constant offset, default: 0.0
* Bivariate Gaussian parameter:
::
# p(x,y) = c + p_center *
# exp( -( (x-mean_x)^2/sigma_x^2 + (y-mean_y)^2/sigma_y^2
# + 2*rho*(x-mean_x)*(y-mean_y)/(sigma_x*sigma_y) ) /
# (2*(1-rho^2)) )
'gaussian2D' :
{'p_center' : float, # value at center, default: 1.0
'mean_x' : float, # x-coordinate of center, default: 0.0
'mean_y' : float, # y-coordinate of center, default: 0.0
'sigma_x' : float, # width in x-direction, default: 1.0
'sigma_y' : float, # width in y-direction, default: 1.0
'rho' : float, # correlation of x and y, default: 0.0
'c' : float} # constant offset, default: 0.0
* Without distance dependency, for randomization
::
# random parameter with uniform distribution in [min,max)
'uniform' :
{'min' : float, # minimum value, default: 0.0
'max' : float} # maximum value, default: 1.0
# or
# random parameter with normal distribution, optionally truncated
# to [min,max)
'normal':
{'mean' : float, # mean value, default: 0.0
'sigma': float, # standard deviation, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
# or
# random parameter with lognormal distribution,
# optionally truncated to [min,max)
'lognormal' :
{'mu' : float, # mean value of logarithm, default: 0.0
'sigma': float, # standard deviation of log, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
**Example**
::
import nest.topology as tp
# create a grid-based layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# parameter for delay with linear distance dependency
d = tp.CreateParameter('linear', {'a': 0.2,
'c': 0.2})
# connectivity specifications
conndict = {'connection_type': 'divergent',
'delays': d}
tp.ConnectLayers(l, l, conndict)
"""
return Parameter(topology_func('CreateParameter', {parametertype: specs}))
def CreateLayer(specs):
"""
Create one ore more Topology layer(s) according to given specifications.
The Topology module organizes neuronal networks in layers. A layer is a
special type of subnet which contains information about the spatial
position of its nodes (simple or composite elements) in 2 or 3 dimensions.
If `specs` is a dictionary, a single layer is created. If it is a list
of dictionaries, one layer is created for each dictionary.
Topology distinguishes between two classes of layers:
* grid-based layers in which each element is placed at a location in a
regular grid
* free layers in which elements can be placed arbitrarily
Obligatory dictionary entries define the class of layer
(grid-based layers: 'columns' and 'rows'; free layers: 'positions')
and the 'elements'.
Parameters
----------
specs : (tuple/list of) dict(s)
Dictionary or list of dictionaries with layer specifications, see
**Notes**.
Returns
-------
out : tuple of int(s)
GID(s) of created layer(s)
See also
--------
ConnectLayers: Connect two (lists of) layers which were created with
``CreateLayer`` pairwise according to specified projections.
Other parameters
----------------
Available parameters for the layer-specifying dictionary `specs`
center : tuple/list of floats, optional, default: (0.0, 0.0)
Layers are centered about the origin by default, but the center
coordinates can also be changed.
'center' has length 2 or 3 dependent on the number of dimensions.
columns : int, obligatory for grid-based layers
Number of columns.
Needs `'rows'`; mutually exclusive with `'positions'`.
edge_wrap : bool, default: False
Periodic boundary conditions.
elements : (tuple/list of) str or str followed by int
Elements of layers are NEST network nodes such as neuron models or
devices.
For network elements with several nodes of the same type, the
number of nodes to be created must follow the model name.
For composite elements, a collection of nodes can be passed as
list or tuple.
extent : tuple of floats, optional, default in 2D: (1.0, 1.0)
Size of the layer. It has length 2 or 3 dependent on the number of
dimensions.
positions : tuple/list of coordinates (lists/tuples of floats),
obligatory for free layers
Explicit specification of the positions of all elements.
The coordinates have a length 2 or 3 dependent on the number of
dimensions.
All element positions must be within the layer’s extent.
Mutually exclusive with 'rows' and 'columns'.
rows : int, obligatory for grid-based layers
Number of rows.
Needs `'columns'`; mutually exclusive with `'positions'`.
Notes
-----
-
**Example**
::
import nest
import nest.topology as tp
# grid-based layer
gl = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# free layer
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5,0.5)]
for i in range(50)]
fl = tp.CreateLayer({'positions' : pos,
'elements' : 'iaf_neuron'})
# extent, center and edge_wrap
el = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'extent' : [2.0, 3.0],
'center' : [1.0, 1.5],
'edge_wrap' : True,
'elements' : 'iaf_neuron'})
# composite layer with several nodes of the same type
cl = tp.CreateLayer({'rows' : 1,
'columns' : 2,
'elements' : ['iaf_cond_alpha', 10,
'poisson_generator',
'noise_generator', 2]})
# investigate the status dictionary of a layer
nest.GetStatus(gl)[0]['topology']
"""
if isinstance(specs, dict):
specs = (specs, )
elif not all(isinstance(spec, dict) for spec in specs):
raise TypeError("specs must be a dictionary or a list of dictionaries")
return topology_func('{ CreateLayer } Map', specs)
def ConnectLayers(pre, post, projections):
"""
Pairwise connect of pre- and postsynaptic (lists of) layers.
`pre` and `post` must be a tuple/list of GIDs of equal length. The GIDs
must refer to layers created with ``CreateLayers``. Layers in the `pre`
and `post` lists are connected pairwise.
* If `projections` is a single dictionary, it applies to all pre-post
pairs.
* If `projections` is a tuple/list of dictionaries, it must have the same
length as `pre` and `post` and each dictionary is matched with the proper
pre-post pair.
A minimal call of ``ConnectLayers`` expects a source layer `pre`, a
target layer `post` and a connection dictionary `projections`
containing at least the entry `'connection_type'` (either
`'convergent'` or `'divergent'`).
When connecting two layers, the driver layer is the one in which each node
is considered in turn. The pool layer is the one from which nodes are
chosen for each node in the driver layer.
Parameters
----------
pre : tuple/list of int(s)
List of GIDs of presynaptic layers (sources)
post : tuple/list of int(s)
List of GIDs of postsynaptic layers (targets)
projections : (tuple/list of) dict(s)
Dictionary or list of dictionaries specifying projection properties
Returns
-------
out : None
ConnectLayers returns `None`
See also
--------
CreateLayer : Create one or more Topology layer(s).
CreateMask : Create a ``Mask`` object. Documentation on available spatial
masks. Masks can be used to specify the key `'mask'` of the
connection dictionary.
CreateParameter : Create a ``Parameter`` object. Documentation on available
parameters for distance dependency and randomization. Parameters can
be used to specify the parameters `'kernel'`, `'weights'` and
`'delays'` of the connection dictionary.
nest.GetConnections : Retrieve connections.
Other parameters
----------------
Available keys for the layer-specifying dictionary `projections`
allow_autapses : bool, optional, default: True
An autapse is a synapse (connection) from a node onto itself.
It is used together with the `'number_of_connections'` option.
allow_multapses : bool, optional, default: True
Node A is connected to node B by a multapse if there are synapses
(connections) from A to B.
It is used together with the `'number_of_connections'` option.
connection_type : str
The type of connections can be either `'convergent'` or
`'divergent'`. In case of convergent connections, the target
layer is considered as driver layer and the source layer as pool
layer - and vice versa for divergent connections.
delays : [float | dict | Parameter object], optional, default: 1.0
Delays can be constant, randomized or distance-dependent according
to a provided function.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
kernel : [float | dict | Parameter object], optional, default: 1.0
A kernel is a function mapping the distance (or displacement)
between a driver and a pool node to a connection probability. The
default kernel is 1.0, i.e., connections are created with
certainty.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
mask : [dict | Mask object], optional
The mask defines which pool nodes are considered as potential
targets for each driver node. Parameters of the different
available masks in 2 and 3 dimensions are also defined in
dictionaries.
If no mask is specified, all neurons from the pool layer are
possible targets for each driver node.
Information on available masks can be found in the documentation on
the function ``CreateMask``.
number_of_connections : int, optional
Prescribed number of connections for each driver node. The actual
connections being created are picked at random from all the
candidate connections.
synapse_model : str, optional
The default synapse model in NEST is used if not specified
otherwise.
weights : [float | dict | Parameter object], optional, default: 1.0
Weights can be constant, randomized or distance-dependent according
to a provided function.
Information on available functions can be found in the
documentation on the function ``CreateParameter``.
Notes
-----
* In the case of free probabilistic connections (in contrast to
prescribing the number of connections), each possible driver-pool
pair is inspected exactly once so that there will be at most one
connection between each driver-pool pair.
* Periodic boundary conditions are always applied in the pool layer.
It is irrelevant whether the driver layer has periodic boundary
conditions or not.
* By default, Topology does not accept masks that are wider than the
pool layer when using periodic boundary conditions.
Kernel, weight and delay functions always consider the shortest
distance (displacement) between driver and pool node.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# connectivity specifications with a mask
conndict1 = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict1)
# connection dictionary with distance-dependent kernel
# (given as Parameter object) and randomized weights
# (given as a dictionary)
gauss_kernel = tp.CreateParameter('gaussian', {'p_center' : 1.0,
'sigma' : 1.0})
conndict2 = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}},
'kernel': gauss_kernel,
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
"""
if not nest.is_sequence_of_gids(pre):
raise TypeError("pre must be a sequence of GIDs")
if not nest.is_sequence_of_gids(pre):
raise TypeError("post must be a sequence of GIDs")
if not len(pre) == len(post):
raise nest.NESTError("pre and post must have the same length.")
# ensure projections is list of full length
projections = nest.broadcast(projections, len(pre), (dict, ),
"projections")
# Replace python classes with SLI datums
def fixdict(d):
d = d.copy()
for k, v in d.items():
if isinstance(v, dict):
d[k] = fixdict(v)
elif isinstance(v, Mask) or isinstance(v, Parameter):
d[k] = v._datum
return d
projections = [fixdict(p) for p in projections]
topology_func('3 arraystore { ConnectLayers } ScanThread', pre, post,
projections)
def GetPosition(nodes):
"""
Return the spatial locations of nodes.
Parameters
----------
nodes : tuple/list of int(s)
List of GIDs
Returns
-------
out : tuple of tuple(s)
List of positions as 2- or 3-element lists
See also
--------
Displacement : Get vector of lateral displacement between nodes.
Distance : Get lateral distance between nodes.
DumpLayerConnections : Write connectivity information to file.
DumpLayerNodes : Write layer node positions to file.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# retrieve positions of all (local) nodes belonging to the layer
gids = nest.GetNodes(l, {'local_only': True})[0]
tp.GetPosition(gids)
"""
if not nest.is_sequence_of_gids(nodes):
raise TypeError("nodes must be a sequence of GIDs")
return topology_func('{ GetPosition } Map', nodes)
def GetLayer(nodes):
"""
Return the layer to which nodes belong.
Parameters
----------
nodes : tuple/list of int(s)
List of neuron GIDs
Returns
-------
out : tuple of int(s)
List of layer GIDs
See also
--------
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# get layer GID of nodes in layer
tp.GetLayer(nest.GetNodes(l)[0])
"""
if not nest.is_sequence_of_gids(nodes):
raise TypeError("nodes must be a sequence of GIDs")
return topology_func('{ GetLayer } Map', nodes)
def GetElement(layers, locations):
"""
Return the node(s) at the location(s) in the given layer(s).
This function works for fixed grid layers only.
* If layers contains a single GID and locations is a single 2-element
array giving a grid location, return a list of GIDs of layer elements
at the given location.
* If layers is a list with a single GID and locations is a list of
coordinates, the function returns a list of lists with GIDs of the nodes
at all locations.
* If layers is a list of GIDs and locations single 2-element array giving
a grid location, the function returns a list of lists with the GIDs of
the nodes in all layers at the given location.
* If layers and locations are lists, it returns a nested list of GIDs, one
list for each layer and each location.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
locations : [tuple/list of floats | tuple/list of tuples/lists of floats]
2-element list with coordinates of a single grid location,
or list of 2-element lists of coordinates for 2-dimensional layers,
i.e., on the format [column, row]
Returns
-------
out : tuple of int(s)
List of GIDs
See also
--------
GetLayer : Return the layer to which nodes belong.
FindNearestElement: Return the node(s) closest to the location(s) in the
given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 4,
'elements' : 'iaf_neuron'})
# get GID of element in last row and column
tp.GetElement(l, [3, 4])
"""
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
if not len(layers) > 0:
raise nest.NESTError("layers cannot be empty")
if not (nest.is_iterable(locations) and len(locations) > 0):
raise nest.NESTError(
"locations must be coordinate array or list of coordinate arrays")
# ensure that all layers are grid-based, otherwise one ends up with an
# incomprehensible error message
try:
topology_func('{ [ /topology [ /rows /columns ] ] get ; } forall',
layers)
except:
raise nest.NESTError(
"layers must contain only grid-based topology layers")
# SLI GetElement returns either single GID or list
def make_tuple(x):
if not nest.is_iterable(x):
return (x, )
else:
return x
if nest.is_iterable(locations[0]):
# layers and locations are now lists
nodes = topology_func(
'/locs Set { /lyr Set locs { lyr exch GetElement } Map } Map',
layers, locations)
node_list = tuple(
tuple(make_tuple(nodes_at_loc) for nodes_at_loc in nodes_in_lyr)
for nodes_in_lyr in nodes)
else:
# layers is list, locations is a single location
nodes = topology_func('/loc Set { loc GetElement } Map', layers,
locations)
node_list = tuple(make_tuple(nodes_in_lyr) for nodes_in_lyr in nodes)
# If only a single layer is given, un-nest list
if len(layers) == 1:
node_list = node_list[0]
return node_list
def FindNearestElement(layers, locations, find_all=False):
"""
Return the node(s) closest to the location(s) in the given layer(s).
This function works for fixed grid layers only.
* If layers contains a single GID and locations is a single 2-element
array giving a grid location, return a list of GIDs of layer elements
at the given location.
* If layers is a list with a single GID and locations is a list of
coordinates, the function returns a list of lists with GIDs of the nodes
at all locations.
* If layers is a list of GIDs and locations single 2-element array giving
a grid location, the function returns a list of lists with the GIDs of
the nodes in all layers at the given location.
* If layers and locations are lists, it returns a nested list of GIDs, one
list for each layer and each location.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
locations : tuple(s)/list(s) of tuple(s)/list(s)
2-element list with coordinates of a single position, or list of
2-element list of positions
find_all : bool, default: False
If there are several nodes with same minimal distance, return only the
first found, if `False`.
If `True`, instead of returning a single GID, return a list of GIDs
containing all nodes with minimal distance.
Returns
-------
out : tuple of int(s)
List of node GIDs
See also
--------
FindCenterElement : Return GID(s) of node closest to center of layers.
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# get GID of element closest to some location
tp.FindNearestElement(l, [3.0, 4.0], True)
"""
import numpy
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
if not len(layers) > 0:
raise nest.NESTError("layers cannot be empty")
if not nest.is_iterable(locations):
raise TypeError(
"locations must be coordinate array or list of coordinate arrays")
# ensure locations is sequence, keeps code below simpler
if not nest.is_iterable(locations[0]):
locations = (locations, )
result = [] # collect one list per layer
# loop over layers
for lyr in layers:
els = nest.GetChildren((lyr, ))[0]
lyr_result = []
# loop over locations
for loc in locations:
d = Distance(numpy.array(loc), els)
if not find_all:
dx = numpy.argmin(d) # finds location of one minimum
lyr_result.append(els[dx])
else:
mingids = list(els[:1])
minval = d[0]
for idx in range(1, len(els)):
if d[idx] < minval:
mingids = [els[idx]]
minval = d[idx]
elif numpy.abs(d[idx] - minval) <= 1e-14 * minval:
mingids.append(els[idx])
lyr_result.append(tuple(mingids))
result.append(tuple(lyr_result))
# If both layers and locations are multi-element lists, result shall remain
# a nested list. Otherwise, either the top or the second level is a single
# element list and we flatten.
assert (len(result) > 0)
if len(result) == 1:
assert (len(layers) == 1)
return result[0]
elif len(result[0]) == 1:
assert (len(locations) == 1)
return tuple(el[0] for el in result)
else:
return tuple(result)
def _check_displacement_args(from_arg, to_arg, caller):
"""
Internal helper function to check arguments to Displacement
and Distance and make them lists of equal length.
"""
import numpy
if isinstance(from_arg, numpy.ndarray):
from_arg = (from_arg, )
elif not (nest.is_iterable(from_arg) and len(from_arg) > 0):
raise nest.NESTError(
"%s: from_arg must be lists of GIDs or positions" % caller)
# invariant: from_arg is list
if not nest.is_sequence_of_gids(to_arg):
raise nest.NESTError("%s: to_arg must be lists of GIDs" % caller)
# invariant: from_arg and to_arg are sequences
if len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(
to_arg):
raise nest.NESTError(
"%s: If to_arg and from_arg are lists, they must have same length."
% caller)
# invariant: from_arg and to_arg have equal length,
# or (at least) one has length 1
if len(from_arg) == 1:
from_arg = from_arg * len(to_arg) # this is a no-op if len(to_arg)==1
if len(to_arg) == 1:
to_arg = to_arg * len(from_arg) # this is a no-op if len(from_arg)==1
# invariant: from_arg and to_arg have equal length
return from_arg, to_arg
def Displacement(from_arg, to_arg):
"""
Get vector of lateral displacement from node(s) `from_arg`
to node(s) `to_arg`.
Displacement is always measured in the layer to which the `to_arg` node
belongs. If a node in the `from_arg` list belongs to a different layer,
its location is projected into the `to_arg` layer. If explicit positions
are given in the `from_arg` list, they are interpreted in the `to_arg`
layer.
Displacement is the shortest displacement, taking into account
periodic boundary conditions where applicable.
* If one of `from_arg` or `to_arg` has length 1, and the other is longer,
the displacement from/to the single item to all other items is given.
* If `from_arg` and `to_arg` both have more than two elements, they have
to be lists of the same length and the displacement for each pair is
returned.
Parameters
----------
from_arg : [tuple/list of int(s) | tuple/list of tuples/lists of floats]
List of GIDs or position(s)
to_arg : tuple/list of int(s)
List of GIDs
Returns
-------
out : tuple
Displacement vectors between pairs of nodes in `from_arg` and `to_arg`
See also
--------
Distance : Get lateral distances between nodes.
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# displacement between node 2 and 3
print tp.Displacement([2], [3])
# displacment between the position (0.0., 0.0) and node 2
print tp.Displacement([(0.0, 0.0)], [2])
"""
from_arg, to_arg = _check_displacement_args(from_arg, to_arg,
'Displacement')
return topology_func('{ Displacement } MapThread', [from_arg, to_arg])
def Distance(from_arg, to_arg):
"""
Get lateral distances from node(s) from_arg to node(s) to_arg.
The distance between two nodes is the length of its displacement.
Distance is always measured in the layer to which the `to_arg` node
belongs. If a node in the `from_arg` list belongs to a different layer,
its location is projected into the `to_arg` layer. If explicit positions
are given in the `from_arg` list, they are interpreted in the `to_arg`
layer.
Distance is the shortest distance, taking into account periodic boundary
conditions where applicable.
* If one of `from_arg` or `to_arg` has length 1, and the other is longer,
the displacement from/to the single item to all other items is given.
* If `from_arg` and `to_arg` both have more than two elements, they have
to be lists of the same length and the distance for each pair is
returned.
Parameters
----------
from_arg : [tuple/list of ints | tuple/list with tuples/lists of floats]
List of GIDs or position(s)
to_arg : tuple/list of ints
List of GIDs
Returns
-------
out : tuple
Distances between from and to
See also
--------
Displacement : Get vector of lateral displacements between nodes.
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* The functions ``GetPosition``, ``Displacement`` and ``Distance`` now
only works for nodes local to the current MPI process, if used in a
MPI-parallel simulation.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# distance between node 2 and 3
print tp.Distance([2], [3])
# distance between the position (0.0., 0.0) and node 2
print tp.Distance([(0.0, 0.0)], [2])
"""
from_arg, to_arg = _check_displacement_args(from_arg, to_arg, 'Distance')
return topology_func('{ Distance } MapThread', [from_arg, to_arg])
def _rank_specific_filename(basename):
"""Returns file name decorated with rank."""
if nest.NumProcesses() == 1:
return basename
else:
np = nest.NumProcesses()
np_digs = len(str(np - 1)) # for pretty formatting
rk = nest.Rank()
dot = basename.find('.')
if dot < 0:
return '%s-%0*d' % (basename, np_digs, rk)
else:
return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:])
def DumpLayerNodes(layers, outname):
"""
Write GID and position data of layer(s) to file.
Write GID and position data to layer(s) file. For each node in a layer,
a line with the following information is written:
::
GID x-position y-position [z-position]
If `layers` contains several GIDs, data for all layers will be written to a
single file.
Parameters
----------
layers : tuple/list of int(s)
List of GIDs of a Topology layer
outname : str
Name of file to write to (existing files are overwritten)
Returns
-------
out : None
See also
--------
DumpLayerConnections : Write connectivity information to file.
GetPosition : Return the spatial locations of nodes.
Notes
-----
* If calling this function from a distributed simulation, this function
will write to one file per MPI rank.
* File names are formed by adding the MPI Rank into the file name before
the file name suffix.
* Each file stores data for nodes local to that file.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# write layer node positions to file
tp.DumpLayerNodes(l, 'positions.txt')
"""
topology_func("""
(w) file exch { DumpLayerNodes } forall close
""",
layers, _rank_specific_filename(outname))
def DumpLayerConnections(layers, synapse_model, outname):
"""
Write connectivity information to file.
This function writes connection information to file for all outgoing
connections from the given layers with the given synapse model.
Data for all layers in the list is combined.
For each connection, one line is stored, in the following format:
::
source_gid target_gid weight delay dx dy [dz]
where (dx, dy [, dz]) is the displacement from source to target node.
If targets do not have positions (eg spike detectors outside any layer),
NaN is written for each displacement coordinate.
Parameters
----------
layers : tuple/list of int(s)
List of GIDs of a Topology layer
synapse_model : str
NEST synapse model
outname : str
Name of file to write to (will be overwritten if it exists)
Returns
-------
out : None
See also
--------
DumpLayerNodes : Write layer node positions to file.
GetPosition : Return the spatial locations of nodes.
nest.GetConnections : Return connection identifiers between
sources and targets
Notes
-----
* If calling this function from a distributed simulation, this function
will write to one file per MPI rank.
* File names are formed by inserting
the MPI Rank into the file name before the file name suffix.
* Each file stores data for local nodes.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
tp.ConnectLayers(l,l, {'connection_type': 'divergent',
'synapse_model': 'static_synapse'})
# write connectivity information to file
tp.DumpLayerConnections(l, 'static_synapse', 'connections.txt')
"""
topology_func("""
/oname Set
cvlit /synmod Set
/lyrs Set
oname (w) file lyrs
{ synmod DumpLayerConnections } forall close
""",
layers, synapse_model, _rank_specific_filename(outname))
def FindCenterElement(layers):
"""
Return GID(s) of node closest to center of layers.
Parameters
----------
layers : tuple/list of int(s)
List of layer GIDs
Returns
-------
out : tuple of int(s)
A list containing for each layer the GID of the node closest to the
center of the layer, as specified in the layer parameters. If several
nodes are equally close to the center, an arbitrary one of them is
returned.
See also
--------
FindNearestElement : Return the node(s) closest to the location(s) in the
given layer(s).
GetElement : Return the node(s) at the location(s) in the given layer(s).
GetPosition : Return the spatial locations of nodes.
Notes
-----
-
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements' : 'iaf_neuron'})
# get GID of the element closest to the center of the layer
tp.FindCenterElement(l)
"""
if not nest.is_sequence_of_gids(layers):
raise TypeError("layers must be a sequence of GIDs")
# Do each layer on its own since FindNearestElement does not thread
return tuple(FindNearestElement((lyr, ),
nest.GetStatus((lyr, ), 'topology')[0][
'center'])[0]
for lyr in layers)
def GetTargetNodes(sources, tgt_layer, tgt_model=None, syn_model=None):
"""
Obtain targets of a list of sources in given target layer.
Parameters
----------
sources : tuple/list of int(s)
List of GID(s) of source neurons
tgt_layer : tuple/list of int(s)
Single-element list with GID of tgt_layer
tgt_model : [None | str], optional, default: None
Return only target positions for a given neuron model.
syn_model : [None | str], optional, default: None
Return only target positions for a given synapse model.
Returns
-------
out : tuple of list(s) of int(s)
List of GIDs of target neurons fulfilling the given criteria.
It is a list of lists, one list per source.
For each neuron in `sources`, this function finds all target elements
in `tgt_layer`. If `tgt_model` is not given (default), all targets are
returned, otherwise only targets of specific type, and similarly for
syn_model.
See also
--------
GetTargetPositions : Obtain positions of targets of a list of sources in a
given target layer.
nest.GetConnections : Return connection identifiers between
sources and targets
Notes
-----
* For distributed simulations, this function only returns targets on the
local MPI process.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# connectivity specifications with a mask
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right': [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# get the GIDs of the targets of the source neuron with GID 5
tp.GetTargetNodes([5], l)
"""
if not nest.is_sequence_of_gids(sources):
raise TypeError("sources must be a sequence of GIDs")
if not nest.is_sequence_of_gids(tgt_layer):
raise TypeError("tgt_layer must be a sequence of GIDs")
if len(tgt_layer) != 1:
raise nest.NESTError("tgt_layer must be a one-element list")
# obtain local nodes in target layer, to pass to GetConnections
tgt_nodes = nest.GetLeaves(tgt_layer,
properties={
'model': tgt_model} if tgt_model else None,
local_only=True)[0]
conns = nest.GetConnections(sources, tgt_nodes, synapse_model=syn_model)
# conns is a flat list of connections.
# Re-organize into one list per source, containing only target GIDs.
src_tgt_map = dict((sgid, []) for sgid in sources)
for conn in conns:
src_tgt_map[conn[0]].append(conn[1])
# convert dict to nested list in same order as sources
return tuple(src_tgt_map[sgid] for sgid in sources)
def GetTargetPositions(sources, tgt_layer, tgt_model=None, syn_model=None):
"""
Obtain positions of targets of a list of sources in a given target layer.
Parameters
----------
sources : tuple/list of int(s)
List of GID(s) of source neurons
tgt_layer : tuple/list of int(s)
Single-element list with GID of tgt_layer
tgt_model : [None | str], optional, default: None
Return only target positions for a given neuron model.
syn_type : [None | str], optional, default: None
Return only target positions for a given synapse model.
Returns
-------
out : tuple of tuple(s) of tuple(s) of floats
Positions of target neurons fulfilling the given criteria as a nested
list, containing one list of positions per node in sources.
For each neuron in `sources`, this function finds all target elements
in `tgt_layer`. If `tgt_model` is not given (default), all targets are
returned, otherwise only targets of specific type, and similarly for
syn_model.
See also
--------
GetTargetNodes : Obtain targets of a list of sources in a given target
layer.
Notes
-----
* For distributed simulations, this function only returns targets on the
local MPI process.
**Example**
::
import nest.topology as tp
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# connectivity specifications with a mask
conndict1 = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict1)
# get the positions of the targets of the source neuron with GID 5
tp.GetTargetPositions([5], l)
"""
return tuple(GetPosition(nodes) for nodes
in GetTargetNodes(sources, tgt_layer, tgt_model, syn_model))
def _draw_extent(ax, xctr, yctr, xext, yext):
"""Draw extent and set aspect ration, limits"""
import matplotlib.pyplot as plt
# thin gray line indicating extent
llx, lly = xctr - xext / 2.0, yctr - yext / 2.0
urx, ury = llx + xext, lly + yext
ax.add_patch(
plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1,
zorder=1))
# set limits slightly outside extent
ax.set(aspect='equal',
xlim=(llx - 0.05 * xext, urx + 0.05 * xext),
ylim=(lly - 0.05 * yext, ury + 0.05 * yext),
xticks=tuple(), yticks=tuple())
def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20):
"""
Plot all nodes in a layer.
This function plots only top-level nodes, not the content of composite
nodes.
Parameters
----------
layer : tuple/list of int(s)
GID of layer to plot, must be tuple/list of length 1
fig : [None | matplotlib.figure.Figure object], optional, default: None
Matplotlib figure to plot to. If not given, a new figure is
created.
nodecolor : [None | any matplotlib color], optional, default: 'b'
Color for nodes
nodesize : float, optional, default: 20
Marker size for nodes
Returns
-------
out : `matplotlib.figure.Figure` object
See also
--------
PlotKernel : Add indication of mask and kernel to axes.
PlotTargets : Plot all targets of a given source.
matplotlib.figure.Figure : matplotlib Figure class
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# plot layer with all its nodes
tp.PlotLayer(l)
plt.show()
"""
import matplotlib.pyplot as plt
if len(layer) != 1:
raise ValueError("layer must contain exactly one GID.")
# get layer extent
ext = nest.GetStatus(layer, 'topology')[0]['extent']
if len(ext) == 2:
# 2D layer
# get layer extent and center, x and y
xext, yext = ext
xctr, yctr = nest.GetStatus(layer, 'topology')[0]['center']
# extract position information, transpose to list of x and y positions
xpos, ypos = zip(*GetPosition(nest.GetChildren(layer)[0]))
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.gca()
ax.scatter(xpos, ypos, s=nodesize, facecolor=nodecolor,
edgecolor='none')
_draw_extent(ax, xctr, yctr, xext, yext)
elif len(ext) == 3:
# 3D layer
from mpl_toolkits.mplot3d import Axes3D
# extract position information, transpose to list of x,y,z positions
pos = zip(*GetPosition(nest.GetChildren(layer)[0]))
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.gca()
ax.scatter3D(*pos, s=nodesize, facecolor=nodecolor, edgecolor='none')
plt.draw_if_interactive()
else:
raise nest.NESTError("unexpected dimension of layer")
return fig
def PlotTargets(src_nrn, tgt_layer, tgt_model=None, syn_type=None, fig=None,
mask=None, kernel=None,
src_color='red', src_size=50, tgt_color='blue', tgt_size=20,
mask_color='red', kernel_color='red'):
"""
Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`.
Parameters
----------
src_nrn : int
GID of source neuron (as single-element list)
tgt_layer : tuple/list of int(s)
GID of tgt_layer (as single-element list)
tgt_model : [None | str], optional, default: None
Show only targets of a given model.
syn_type : [None | str], optional, default: None
Show only targets connected to with a given synapse type
fig : [None | matplotlib.figure.Figure object], optional, default: None
Matplotlib figure to plot to. If not given, a new figure is created.
mask : [None | dict], optional, default: None
Draw topology mask with targets; see ``PlotKernel`` for details.
kernel : [None | dict], optional, default: None
Draw topology kernel with targets; see ``PlotKernel`` for details.
src_color : [None | any matplotlib color], optional, default: 'red'
Color used to mark source node position
src_size : float, optional, default: 50
Size of source marker (see scatter for details)
tgt_color : [None | any matplotlib color], optional, default: 'blue'
Color used to mark target node positions
tgt_size : float, optional, default: 20
Size of target markers (see scatter for details)
mask_color : [None | any matplotlib color], optional, default: 'red'
Color used for line marking mask
kernel_color : [None | any matplotlib color], optional, default: 'red'
Color used for lines marking kernel
Returns
-------
out : matplotlib.figure.Figure object
See also
--------
GetTargetNodes : Obtain targets of a list of sources in a given target
layer.
GetTargetPositions : Obtain positions of targets of a list of sources in a
given target layer.
PlotKernel : Add indication of mask and kernel to axes.
PlotLayer : Plot all nodes in a layer.
matplotlib.pyplot.scatter : matplotlib scatter plot.
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# connectivity specifications with a mask
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# plot the targets of the source neuron with GID 5
tp.PlotTargets([5], l)
plt.show()
"""
import matplotlib.pyplot as plt
# get position of source
srcpos = GetPosition(src_nrn)[0]
# get layer extent and center, x and y
ext = nest.GetStatus(tgt_layer, 'topology')[0]['extent']
if len(ext) == 2:
# 2D layer
# get layer extent and center, x and y
xext, yext = ext
xctr, yctr = nest.GetStatus(tgt_layer, 'topology')[0]['center']
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.gca()
# get positions, reorganize to x and y vectors
tgtpos = GetTargetPositions(src_nrn, tgt_layer, tgt_model, syn_type)
if tgtpos:
xpos, ypos = zip(*tgtpos[0])
ax.scatter(xpos, ypos, s=tgt_size, facecolor=tgt_color,
edgecolor='none')
ax.scatter(srcpos[:1], srcpos[1:], s=src_size, facecolor=src_color,
edgecolor='none',
alpha=0.4, zorder=-10)
_draw_extent(ax, xctr, yctr, xext, yext)
if mask is not None or kernel is not None:
PlotKernel(ax, src_nrn, mask, kernel, mask_color, kernel_color)
else:
# 3D layer
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.gca()
# get positions, reorganize to x,y,z vectors
tgtpos = GetTargetPositions(src_nrn, tgt_layer, tgt_model, syn_type)
if tgtpos:
xpos, ypos, zpos = zip(*tgtpos[0])
ax.scatter3D(xpos, ypos, zpos, s=tgt_size, facecolor=tgt_color,
edgecolor='none')
ax.scatter3D(srcpos[:1], srcpos[1:2], srcpos[2:], s=src_size,
facecolor=src_color, edgecolor='none',
alpha=0.4, zorder=-10)
plt.draw_if_interactive()
return fig
def PlotKernel(ax, src_nrn, mask, kern=None, mask_color='red',
kernel_color='red'):
"""
Add indication of mask and kernel to axes.
Adds solid red line for mask. For doughnut mask show inner and outer line.
If kern is Gaussian, add blue dashed lines marking 1, 2, 3 sigma.
This function ignores periodic boundary conditions.
Usually, this function is invoked by ``PlotTargets``.
Parameters
----------
ax : matplotlib.axes.AxesSubplot,
subplot reference returned by PlotTargets
src_nrn : int
GID of source neuron (as single element list), mask and kernel
plotted relative to it
mask : dict
Mask used in creating connections.
kern : [None | dict], optional, default: None
Kernel used in creating connections
mask_color : [None | any matplotlib color], optional, default: 'red'
Color used for line marking mask
kernel_color : [None | any matplotlib color], optional, default: 'red'
Color used for lines marking kernel
Returns
-------
out : None
See also
--------
CreateMask : Create a ``Mask`` object. Documentation on available spatial
masks.
CreateParameter : Create a ``Parameter`` object. Documentation on available
parameters for distance dependency and randomization.
PlotLayer : Plot all nodes in a layer.
Notes
-----
* Do not use this function in distributed simulations.
**Example**
::
import nest.topology as tp
import matplotlib.pyplot as plt
# create a layer
l = tp.CreateLayer({'rows' : 11,
'columns' : 11,
'extent' : [11.0, 11.0],
'elements' : 'iaf_neuron'})
# connectivity specifications
mask_dict = {'rectangular': {'lower_left' : [-2.0, -1.0],
'upper_right' : [2.0, 1.0]}}
kernel_dict = {'gaussian': {'p_center' : 1.0,
'sigma' : 1.0}}
conndict = {'connection_type': 'divergent',
'mask' : mask_dict,
'kernel' : kernel_dict}
# connect layer l with itself according to the given
# specifications
tp.ConnectLayers(l, l, conndict)
# set up figure
fig, ax = plt.subplots()
# plot layer nodes
tp.PlotLayer(l, fig)
# choose center element of the layer as source node
ctr_elem = tp.FindCenterElement(l)
# plot mask and kernel of the center element
tp.PlotKernel(ax, ctr_elem, mask=mask_dict, kern=kernel_dict)
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# minimal checks for ax having been created by PlotKernel
if ax and not isinstance(ax, matplotlib.axes.Axes):
raise ValueError('ax must be matplotlib.axes.Axes instance.')
srcpos = np.array(GetPosition(src_nrn)[0])
if 'anchor' in mask:
offs = np.array(mask['anchor'])
else:
offs = np.array([0., 0.])
if 'circular' in mask:
r = mask['circular']['radius']
ax.add_patch(plt.Circle(srcpos + offs, radius=r, zorder=-1000,
fc='none', ec=mask_color, lw=3))
elif 'doughnut' in mask:
r_in = mask['doughnut']['inner_radius']
r_out = mask['doughnut']['outer_radius']
ax.add_patch(plt.Circle(srcpos + offs, radius=r_in, zorder=-1000,
fc='none', ec=mask_color, lw=3))
ax.add_patch(plt.Circle(srcpos + offs, radius=r_out, zorder=-1000,
fc='none', ec=mask_color, lw=3))
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
ax.add_patch(
plt.Rectangle(srcpos + ll + offs, ur[0] - ll[0], ur[1] - ll[1],
zorder=-1000, fc='none', ec=mask_color, lw=3))
else:
raise ValueError(
'Mask type cannot be plotted with this version of PyTopology.')
if kern is not None and isinstance(kern, dict):
if 'gaussian' in kern:
sigma = kern['gaussian']['sigma']
for r in range(3):
ax.add_patch(plt.Circle(srcpos + offs, radius=(r + 1) * sigma,
zorder=-1000,
fc='none', ec=kernel_color, lw=3,
ls='dashed'))
else:
raise ValueError('Kernel type cannot be plotted with this ' +
'version of PyTopology')
plt.draw()
| gpl-2.0 |
jkbradley/spark | python/pyspark/sql/tests/test_dataframe.py | 2 | 37690 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
from pyspark.sql import DataFrame
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_empty_dataframe(self):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_null_dataframe(self):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_to_pandas_from_mixed_dataframe(self):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
CAST(col9 AS TIMESTAMP) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message)
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.util.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEquals(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEquals(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEquals(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEquals(None, df._repr_html_())
self.assertEquals(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
iamshang1/Projects | Advanced_ML/Text_Classification/tf_han.py | 1 | 14525 | '''
hierarchical attention network for document classification
https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf
'''
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, GRUCell
import sys
import time
class hierarchical_attention_network(object):
'''
hierarchical attention network for document classification
https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf
parameters:
- embedding_matrix: numpy array
numpy array of word embeddings
each row should represent a word embedding
NOTE: the word index 0 is dropped, so the first row is ignored
- num classes: int
number of output classes
- max_sents: int
maximum number of sentences per document
- max_words: int
maximum number of words per sentence
- rnn_type: string (default: "gru")
rnn cells to use, can be "gru" or "lstm"
- rnn_units: int (default: 100)
number of rnn units to use for embedding layers
- attention_context: int (default: 50)
number of dimensions to use for attention context layer
- dropout_keep: float (default: 0.5)
dropout keep rate for final softmax layer
methods:
- train(data,labels,epochs=30,savebest=False,filepath=None)
train network on given data
- predict(data)
return the one-hot-encoded predicted labels for given data
- score(data,labels,bootstrap=False,bs_samples=100)
return the accuracy of predicted labels on given data
- save(filepath)
save the model weights to a file
- load(filepath)
load model weights from a file
'''
def __init__(self,embedding_matrix,num_classes,max_sents,max_words,rnn_type="gru",
rnn_units=200,attention_context=300,dropout_keep=1.0):
self.rnn_units = rnn_units
if rnn_type == "gru":
self.rnn_cell = GRUCell
elif rnn_type == "lstm":
self.rnn_cell = LSTMCell
else:
raise Exception("rnn_type parameter must be set to gru or lstm")
self.dropout_keep = dropout_keep
self.vocab = embedding_matrix
self.embedding_size = embedding_matrix.shape[1]
self.ms = max_sents
self.mw = max_words
#shared variables
with tf.variable_scope('words'):
self.word_atten_W = tf.Variable(self._ortho_weight(2*rnn_units,attention_context),name='word_atten_W')
self.word_atten_b = tf.Variable(np.asarray(np.zeros(attention_context),dtype=np.float32),name='word_atten_b')
self.word_softmax = tf.Variable(self._ortho_weight(attention_context,1),name='word_softmax')
with tf.variable_scope('sentence'):
self.sent_atten_W = tf.Variable(self._ortho_weight(2*rnn_units,attention_context),name='sent_atten_W')
self.sent_atten_b = tf.Variable(np.asarray(np.zeros(attention_context),dtype=np.float32),name='sent_atten_b')
self.sent_softmax = tf.Variable(self._ortho_weight(attention_context,1),name='sent_softmax')
with tf.variable_scope('classify'):
self.W_softmax = tf.Variable(self._ortho_weight(rnn_units*2,num_classes),name='W_softmax')
self.b_softmax = tf.Variable(np.asarray(np.zeros(num_classes),dtype=np.float32),name='b_softmax')
self.embeddings = tf.constant(self.vocab,tf.float32)
self.dropout = tf.placeholder(tf.float32)
#doc input and mask
self.doc_input = tf.placeholder(tf.int32, shape=[max_sents,max_words])
self.words_per_line = tf.reduce_sum(tf.sign(self.doc_input),1)
self.max_lines = tf.reduce_sum(tf.sign(self.words_per_line))
self.max_words = tf.reduce_max(self.words_per_line)
self.doc_input_reduced = self.doc_input[:self.max_lines,:self.max_words]
self.num_words = self.words_per_line[:self.max_lines]
#word rnn layer
self.word_embeds = tf.gather(tf.get_variable('embeddings',initializer=self.embeddings,dtype=tf.float32),self.doc_input_reduced)
with tf.variable_scope('words'):
[word_outputs_fw,word_outputs_bw],_ = \
tf.nn.bidirectional_dynamic_rnn(
tf.contrib.rnn.DropoutWrapper(self.rnn_cell(self.rnn_units),state_keep_prob=self.dropout),
tf.contrib.rnn.DropoutWrapper(self.rnn_cell(self.rnn_units),state_keep_prob=self.dropout),
self.word_embeds,sequence_length=self.num_words,dtype=tf.float32)
word_outputs = tf.concat((word_outputs_fw, word_outputs_bw),2)
#word attention
seq_mask = tf.reshape(tf.sequence_mask(self.num_words,self.max_words),[-1])
u = tf.nn.tanh(tf.matmul(tf.reshape(word_outputs,[-1,self.rnn_units*2]),self.word_atten_W)+self.word_atten_b)
exps = tf.exp(tf.matmul(u,self.word_softmax))
exps = tf.where(seq_mask,exps,tf.ones_like(exps)*0.000000001)
alpha = tf.reshape(exps,[-1,self.max_words,1])
alpha /= tf.reshape(tf.reduce_sum(alpha,1),[-1,1,1])
self.sent_embeds = tf.reduce_sum(word_outputs*alpha,1)
self.sent_embeds = tf.expand_dims(self.sent_embeds,0)
#sentence rnn layer
with tf.variable_scope('sentence'):
[self.sent_outputs_fw,self.sent_outputs_bw],_ = \
tf.nn.bidirectional_dynamic_rnn(
tf.contrib.rnn.DropoutWrapper(self.rnn_cell(self.rnn_units),state_keep_prob=self.dropout),
tf.contrib.rnn.DropoutWrapper(self.rnn_cell(self.rnn_units),state_keep_prob=self.dropout),
self.sent_embeds,sequence_length=tf.expand_dims(self.max_lines,0),dtype=tf.float32)
self.sent_outputs = tf.concat((tf.squeeze(self.sent_outputs_fw,[0]),tf.squeeze(self.sent_outputs_bw,[0])),1)
#sentence attention
self.sent_u = tf.nn.tanh(tf.matmul(self.sent_outputs,self.sent_atten_W) + self.sent_atten_b)
self.sent_exp = tf.exp(tf.matmul(self.sent_u,self.sent_softmax))
self.sent_atten = self.sent_exp/tf.reduce_sum(self.sent_exp)
self.doc_embed = tf.transpose(tf.matmul(tf.transpose(self.sent_outputs),self.sent_atten))
#classification functions
self.output = tf.matmul(self.doc_embed,self.W_softmax)+self.b_softmax
self.prediction = tf.nn.softmax(self.output)
#loss, accuracy, and training functions
self.labels = tf.placeholder(tf.float32, shape=[num_classes])
self.labels_rs = tf.expand_dims(self.labels,0)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.output,labels=self.labels_rs))
self.optimizer = tf.train.AdamOptimizer(0.00001,0.9,0.99).minimize(self.loss)
#init op
self.init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(self.init_op)
def _ortho_weight(self,fan_in,fan_out):
'''
generate orthogonal weight matrix
'''
bound = np.sqrt(2./(fan_in+fan_out))
W = np.random.randn(fan_in,fan_out)*bound
u, s, v = np.linalg.svd(W,full_matrices=False)
if u.shape[0] != u.shape[1]:
W = u
else:
W = v
return W.astype(np.float32)
def _list_to_numpy(self,inputval):
'''
convert variable length lists of input values to zero padded numpy array
'''
if type(inputval) == list:
retval = np.zeros((self.ms,self.mw))
for i,line in enumerate(inputval):
for j, word in enumerate(line):
retval[i,j] = word
return retval
elif type(inputval) == np.ndarray:
return inputval
else:
raise Exception("invalid input type")
def train(self,data,labels,epochs=30,validation_data=None,savebest=False,filepath=None):
'''
train network on given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- labels: numpy array
2d numpy array of one-hot-encoded labels
- epochs: int (default: 30)
number of epochs to train for
- validation_data: tuple (optional)
tuple of numpy arrays (X,y) representing validation data
- savebest: boolean (default: False)
set to True to save the best model based on validation score per epoch
- filepath: string (optional)
path to save model if savebest is set to True
outputs:
None
'''
if savebest==True and filepath==None:
raise Exception("Please enter a path to save the network")
if validation_data:
validation_size = len(validation_data[0])
else:
validation_size = len(data)
print('training network on %i documents, validating on %i documents' \
% (len(data), validation_size))
#track best model for saving
prevbest = 0
for i in range(epochs):
correct = 0.
start = time.time()
#train
for doc in range(len(data)):
inputval = self._list_to_numpy(data[doc])
feed_dict = {self.doc_input:inputval,self.labels:labels[doc],self.dropout:self.dropout_keep}
pred,cost,_ = self.sess.run([self.prediction,self.loss,self.optimizer],feed_dict=feed_dict)
if np.argmax(pred) == np.argmax(labels[doc]):
correct += 1
sys.stdout.write("epoch %i, sample %i of %i, loss: %f \r"\
% (i+1,doc+1,len(data),cost))
sys.stdout.flush()
if (doc+1) % 50000 == 0:
score = self.score(validation_data[0],validation_data[1])
print("iteration %i validation accuracy: %.4f%%" % (doc+1, score*100))
print()
#print("training time: %.2f" % (time.time()-start))
trainscore = correct/len(data)
print("epoch %i training accuracy: %.4f%%" % (i+1, trainscore*100))
#validate
if validation_data:
score = self.score(validation_data[0],validation_data[1])
print("epoch %i validation accuracy: %.4f%%" % (i+1, score*100))
#save if performance better than previous best
if savebest and score >= prevbest:
prevbest = score
self.save(filepath)
def predict(self,data):
'''
return the one-hot-encoded predicted labels for given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
outputs:
numpy array of one-hot-encoded predicted labels for input data
'''
labels = []
for doc in range(len(data)):
inputval = self._list_to_numpy(data[doc])
feed_dict = {self.doc_input:inputval,self.dropout:1.0}
prob = self.sess.run(self.prediction,feed_dict=feed_dict)
prob = np.squeeze(prob,0)
one_hot = np.zeros_like(prob)
one_hot[np.argmax(prob)] = 1
labels.append(one_hot)
labels = np.array(labels)
return labels
def score(self,data,labels):
'''
return the accuracy of predicted labels on given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- labels: numpy array
2d numpy array of one-hot-encoded labels
outputs:
float representing accuracy of predicted labels on given data
'''
correct = 0.
for doc in range(len(data)):
inputval = self._list_to_numpy(data[doc])
feed_dict = {self.doc_input:inputval,self.dropout:1.0}
prob = self.sess.run(self.prediction,feed_dict=feed_dict)
if np.argmax(prob) == np.argmax(labels[doc]):
correct +=1
accuracy = correct/len(labels)
return accuracy
def save(self,filename):
'''
save the model weights to a file
parameters:
- filepath: string
path to save model weights
outputs:
None
'''
self.saver.save(self.sess,filename)
def load(self,filename):
'''
load model weights from a file
parameters:
- filepath: string
path from which to load model weights
outputs:
None
'''
self.saver.restore(self.sess,filename)
if __name__ == "__main__":
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.model_selection import train_test_split
import pickle
import os
#load saved files
print "loading data"
vocab = np.load('embeddings.npy')
with open('data.pkl', 'rb') as f:
data = pickle.load(f)
num_docs = len(data)
#convert data to numpy arrays
print "converting data to arrays"
max_sents = 0
max_words = 0
docs = []
labels = []
for i in range(num_docs):
sys.stdout.write("processing record %i of %i \r" % (i+1,num_docs))
sys.stdout.flush()
doc = data[i]['idx']
docs.append(doc)
labels.append(data[i]['label'])
if len(doc) > max_sents:
max_sents = len(doc)
if len(max(doc,key=len)) > max_words:
max_words = len(max(doc,key=len))
del data
print
#label encoder
le = LabelEncoder()
y = le.fit_transform(labels)
classes = len(le.classes_)
lb = LabelBinarizer()
y_bin = lb.fit_transform(y)
del labels
#test train split
X_train,X_test,y_train,y_test = train_test_split(docs,y_bin,test_size=0.1,
random_state=1234,stratify=y)
#train nn
print "building hierarchical attention network"
nn = hierarchical_attention_network(vocab,classes,max_sents,max_words)
nn.train(X_train,y_train,epochs=5,validation_data=(X_test,y_test))
| mit |
thorwhalen/ut | ml/text/topic_analysis.py | 1 | 6849 |
__author__ = 'thor'
from wordcloud import WordCloud
import colorsys
import seaborn as sns
from numpy import sqrt, linspace, ceil, where, arange, array, any, floor, ceil, ndarray
from pandas import Series
import matplotlib.pyplot as plt
class TopicExplorer(object):
def __init__(self, url_vectorizer, topic_model, topic_weight_normalization=None,
word_preprocessor=None,
wordcloud_params={'ranks_only': True, 'width': 300, 'height': 300, 'margin': 1,
'background_color': "black"},
replace_empty_feature_with='EMPTY',
word_art_params={}):
self.url_vectorizer = url_vectorizer
self.feature_names = self.url_vectorizer.get_feature_names()
if word_preprocessor is None:
self.word_preprocessor = lambda x: x
else:
self.word_preprocessor = word_preprocessor
# some features might have empty names: Replace them with replace_empty_feature_with
if replace_empty_feature_with is not None:
lidx = array(self.feature_names) == ''
if any(lidx):
self.feature_names[lidx] = replace_empty_feature_with
self.topic_model = topic_model
self.wordcloud_params = wordcloud_params
self.word_art_params = word_art_params
self.n_topics = len(self.topic_model.components_)
topic_components = self.topic_model.components_
if topic_weight_normalization is not None:
if isinstance(topic_weight_normalization, str):
if topic_weight_normalization == 'tf_normal':
def topic_weight_normalization(topic_components):
topic_components /= topic_components.sum(axis=1)[:, None]
topic_components *= 1 / sqrt((topic_components ** 2).sum(axis=0))
return topic_components
else:
ValueError("Unknown topic_weight_normalization name")
if callable(topic_weight_normalization):
topic_components = topic_weight_normalization(topic_components)
self.topic_word_weights = list()
for topic_idx, topic in enumerate(topic_components):
topic_ww = dict()
for i in topic.argsort():
topic_ww[self.feature_names[i]] = topic_components[topic_idx, i]
self.topic_word_weights.append(Series(topic_ww).sort_values(ascending=False, inplace=False))
self.topic_color = ["hsl(0, 100%, 100%)"]
h_list = list(map(int, linspace(0, 360, len(self.topic_model.components_))))[:-1]
for h in h_list:
self.topic_color.append("hsl({}, 100%, 50%)".format(h))
def topic_weights(self, text_collection):
if isinstance(text_collection, str):
urls = [text_collection]
return self.topic_model.transform(self.url_vectorizer.transform(text_collection))
def topic_word_art(self, topic_idx=None, n_words=20, save_file=None, color_func=None,
random_state=1, fig_row_size=16, **kwargs):
if topic_idx is None:
ncols = int(floor(sqrt(self.n_topics)))
nrows = int(ceil(self.n_topics / float(ncols)))
ncols_to_nrows_ratio = ncols / nrows
plt.figure(figsize=(fig_row_size, ncols_to_nrows_ratio * fig_row_size))
for i in range(self.n_topics):
plt.subplot(nrows, ncols, i + 1)
self.topic_word_art(topic_idx=i, n_words=n_words, save_file=save_file,
color_func=color_func, random_state=random_state, **kwargs)
plt.gcf().subplots_adjust(wspace=.1, hspace=.1)
# elif isinstance(topic_idx, (list, tuple, ndarray)) and len(topic_idx) == self.n_topics:
# ncols = int(floor(sqrt(self.n_topics)))
# nrows = int(ceil(self.n_topics / float(ncols)))
# ncols_to_nrows_ratio = ncols / nrows
# plt.figure(figsize=(fig_row_size, ncols_to_nrows_ratio * fig_row_size))
# for i in range(self.n_topics):
# plt.subplot(nrows, ncols, i + 1)
# self.topic_word_art(topic_idx=i, n_words=n_words, save_file=save_file,
# color_func=color_func, random_state=random_state,
# width=int(self.wordcloud_params['width'] * topic_idx[i]),
# height=int(self.wordcloud_params['height'] * topic_idx[i]))
# plt.gcf().subplots_adjust(wspace=.1, hspace=.1)
else:
kwargs = dict(self.wordcloud_params, **kwargs)
if color_func is None:
color_func = self.word_art_params.get('color_func', self.topic_color[topic_idx])
if isinstance(color_func, tuple):
color_func = "rgb({}, {}, {})".format(*list(map(int, color_func)))
if isinstance(color_func, str):
color = color_func
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return color
elif not callable(color_func):
TypeError("Unrecognized hsl_color type ()".format(type(color_func)))
# kwargs = dict(self.word_art_params, **kwargs)
wc = WordCloud(random_state=random_state, **kwargs)
wc.fit_words([(self.word_preprocessor(k), v)
for k, v in self.topic_word_weights[topic_idx].iloc[:n_words].to_dict().items()])
# wc.recolor(color_func=kwargs['color_func'], random_state=random_state)
plt.imshow(wc.recolor(color_func=color_func, random_state=random_state))
plt.grid(False)
plt.xticks([])
plt.yticks([])
def plot_topic_trajectory(self, urls):
_topic_weights = self.topic_weights(urls)
_topic_weights = (_topic_weights.T / _topic_weights.max(axis=1))
sns.heatmap(_topic_weights, cbar=False, linewidths=1)
plt.ylabel('Topic')
plt.xlabel('Page view')
ax = plt.gca()
start, end = ax.get_xlim()
if _topic_weights.shape[1] > 20:
ax.xaxis.set_ticks(arange(start, end, 10))
ax.xaxis.set_ticklabels(arange(start, end, 10).astype(int))
return ax
def plot_topic_trajectory_of_tcid(self, tcid, data):
d = data[data.tc_id == tcid].sort_values(by='timestamp', ascending=True)
urls = d.data_url_test
ax = self.plot_topic_trajectory(urls)
conversion_idx = where(array(d.data_env_template == 'funnel_confirmation'))[0]
if len(conversion_idx):
min_y, max_y = plt.ylim()
for idx in conversion_idx:
plt.plot((idx + 0.5, idx + 0.5), (min_y, max_y), 'b-')
| mit |
kkk669/mxnet | docs/mxdoc.py | 13 | 12762 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
# import StringIO from io for python3 compatibility
from io import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download-btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 |
Clyde-fare/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/extension/test_sparse.py | 1 | 15154 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_object_dtype
import pandas as pd
from pandas import SparseDtype
import pandas._testing as tm
from pandas.arrays import SparseArray
from pandas.tests.extension import base
def make_data(fill_value):
if np.isnan(fill_value):
data = np.random.uniform(size=100)
else:
data = np.random.randint(1, 100, size=100)
if data[0] == data[1]:
data[0] += 1
data[2::3] = fill_value
return data
@pytest.fixture
def dtype():
return SparseDtype()
@pytest.fixture(params=[0, np.nan])
def data(request):
"""Length-100 PeriodArray for semantics test."""
res = SparseArray(make_data(request.param), fill_value=request.param)
return res
@pytest.fixture
def data_for_twos(request):
return SparseArray(np.ones(100) * 2)
@pytest.fixture(params=[0, np.nan])
def data_missing(request):
"""Length 2 array with [NA, Valid]"""
return SparseArray([np.nan, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_repeated(request):
"""Return different versions of data for count times"""
def gen(count):
for _ in range(count):
yield SparseArray(make_data(request.param), fill_value=request.param)
yield gen
@pytest.fixture(params=[0, np.nan])
def data_for_sorting(request):
return SparseArray([2, 3, 1], fill_value=request.param)
@pytest.fixture(params=[0, np.nan])
def data_missing_for_sorting(request):
return SparseArray([2, np.nan, 1], fill_value=request.param)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
return lambda left, right: pd.isna(left) and pd.isna(right)
@pytest.fixture(params=[0, np.nan])
def data_for_grouping(request):
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)
class BaseSparseTests:
def _check_unsupported(self, data):
if data.dtype == SparseDtype(int, 0):
pytest.skip("Can't store nan in int array.")
@pytest.mark.xfail(reason="SparseArray does not support setitem")
def test_ravel(self, data):
super().test_ravel(data)
class TestDtype(BaseSparseTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
assert dtype.construct_array_type() is SparseArray
class TestInterface(BaseSparseTests, base.BaseInterfaceTests):
def test_no_values_attribute(self, data):
pytest.skip("We have values")
def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
class TestConstructors(BaseSparseTests, base.BaseConstructorsTests):
pass
class TestReshaping(BaseSparseTests, base.BaseReshapingTests):
def test_concat_mixed_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/20762
# This should be the same, aside from concat([sparse, float])
df1 = pd.DataFrame({"A": data[:3]})
df2 = pd.DataFrame({"A": [1, 2, 3]})
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
dfs = [df1, df2, df3]
# dataframes
result = pd.concat(dfs)
expected = pd.concat(
[x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]
)
self.assert_frame_equal(result, expected)
def test_concat_columns(self, data, na_value):
self._check_unsupported(data)
super().test_concat_columns(data, na_value)
def test_concat_extension_arrays_copy_false(self, data, na_value):
self._check_unsupported(data)
super().test_concat_extension_arrays_copy_false(data, na_value)
def test_align(self, data, na_value):
self._check_unsupported(data)
super().test_align(data, na_value)
def test_align_frame(self, data, na_value):
self._check_unsupported(data)
super().test_align_frame(data, na_value)
def test_align_series_frame(self, data, na_value):
self._check_unsupported(data)
super().test_align_series_frame(data, na_value)
def test_merge(self, data, na_value):
self._check_unsupported(data)
super().test_merge(data, na_value)
@pytest.mark.xfail(reason="SparseArray does not support setitem")
def test_transpose(self, data):
super().test_transpose(data)
class TestGetitem(BaseSparseTests, base.BaseGetitemTests):
def test_get(self, data):
s = pd.Series(data, index=[2 * i for i in range(len(data))])
if np.isnan(s.values.fill_value):
assert np.isnan(s.get(4)) and np.isnan(s.iloc[2])
else:
assert s.get(4) == s.iloc[2]
assert s.get(2) == s.iloc[1]
def test_reindex(self, data, na_value):
self._check_unsupported(data)
super().test_reindex(data, na_value)
# Skipping TestSetitem, since we don't implement it.
class TestMissing(BaseSparseTests, base.BaseMissingTests):
def test_isna(self, data_missing):
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
expected = SparseArray([True, False], dtype=expected_dtype)
result = pd.isna(data_missing)
self.assert_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=expected_dtype)
self.assert_series_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_pad(data_missing)
def test_fillna_limit_backfill(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_backfill(data_missing)
def test_fillna_series_method(self, data_missing):
with tm.assert_produces_warning(PerformanceWarning):
super().test_fillna_limit_backfill(data_missing)
@pytest.mark.skip(reason="Unsupported")
def test_fillna_series(self):
# this one looks doable.
pass
def test_fillna_frame(self, data_missing):
# Have to override to specify that fill_value will change.
fill_value = data_missing[1]
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
if pd.isna(data_missing.fill_value):
dtype = SparseDtype(data_missing.dtype, fill_value)
else:
dtype = data_missing.dtype
expected = pd.DataFrame(
{
"A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),
"B": [1, 2],
}
)
self.assert_frame_equal(result, expected)
class TestMethods(BaseSparseTests, base.BaseMethodsTests):
def test_combine_le(self, data_repeated):
# We return a Series[SparseArray].__le__ returns a
# Series[Sparse[bool]]
# rather than Series[bool]
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
SparseArray(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
fill_value=False,
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(
SparseArray([a <= val for a in list(orig_data1)], fill_value=False)
)
self.assert_series_equal(result, expected)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.values.base is not result.values.base
assert df.A._values.to_dense() is arr.to_dense()
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values.to_dense() is arr.to_dense()
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
pass
def test_where_series(self, data, na_value):
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
result = ser.where(cond)
new_dtype = SparseDtype("float", 0.0)
expected = pd.Series(
cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)
)
self.assert_series_equal(result, expected)
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
cond = np.array([True, False, True, True])
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
self.assert_series_equal(result, expected)
def test_combine_first(self, data):
if data.dtype.subtype == "int":
# Right now this is upcasted to float, just like combine_first
# for Series[int]
pytest.skip("TODO(SparseArray.__setitem__ will preserve dtype.")
super().test_combine_first(data)
def test_searchsorted(self, data_for_sorting, as_series):
with tm.assert_produces_warning(PerformanceWarning):
super().test_searchsorted(data_for_sorting, as_series)
def test_shift_0_periods(self, data):
# GH#33856 shifting with periods=0 should return a copy, not same obj
result = data.shift(0)
data._sparse_values[0] = data._sparse_values[1]
assert result._sparse_values[0] != result._sparse_values[1]
@pytest.mark.parametrize("method", ["argmax", "argmin"])
def test_argmin_argmax_all_na(self, method, data, na_value):
# overriding because Sparse[int64, 0] cannot handle na_value
self._check_unsupported(data)
super().test_argmin_argmax_all_na(method, data, na_value)
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box):
self._check_unsupported(data)
super().test_equals(data, na_value, as_series, box)
class TestCasting(BaseSparseTests, base.BaseCastingTests):
def test_astype_object_series(self, all_data):
# Unlike the base class, we do not expect the resulting Block
# to be ObjectBlock
ser = pd.Series(all_data, name="A")
result = ser.astype(object)
assert is_object_dtype(result._data.blocks[0].dtype)
def test_astype_object_frame(self, all_data):
# Unlike the base class, we do not expect the resulting Block
# to be ObjectBlock
df = pd.DataFrame({"A": all_data})
result = df.astype(object)
assert is_object_dtype(result._data.blocks[0].dtype)
# FIXME: these currently fail; dont leave commented-out
# check that we can compare the dtypes
# comp = result.dtypes.equals(df.dtypes)
# assert not comp.any()
def test_astype_str(self, data):
result = pd.Series(data[:5]).astype(str)
expected_dtype = SparseDtype(str, str(data.fill_value))
expected = pd.Series([str(x) for x in data[:5]], dtype=expected_dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")
def test_astype_string(self, data):
super().test_astype_string(data)
class TestArithmeticOps(BaseSparseTests, base.BaseArithmeticOpsTests):
series_scalar_exc = None
frame_scalar_exc = None
divmod_exc = None
series_array_exc = None
def _skip_if_different_combine(self, data):
if data.fill_value == 0:
# arith ops call on dtype.fill_value so that the sparsity
# is maintained. Combine can't be called on a dtype in
# general, so we can't make the expected. This is tested elsewhere
raise pytest.skip("Incorrected expected from Series.combine")
def test_error(self, data, all_arithmetic_operators):
pass
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
self._skip_if_different_combine(data)
super().test_arith_series_with_array(data, all_arithmetic_operators)
class TestComparisonOps(BaseSparseTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
# hard to test the fill value, since we don't know what expected
# is in general.
# Rely on tests in `tests/sparse` to validate that.
assert isinstance(result.dtype, SparseDtype)
assert result.dtype.subtype == np.dtype("bool")
with np.errstate(all="ignore"):
expected = pd.Series(
SparseArray(
op(np.asarray(data), np.asarray(other)),
fill_value=result.values.fill_value,
)
)
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
tm.assert_series_equal(result, expected)
class TestPrinting(BaseSparseTests, base.BasePrintingTests):
@pytest.mark.xfail(reason="Different repr", strict=True)
def test_array_repr(self, data, size):
super().test_array_repr(data, size)
class TestParsing(BaseSparseTests, base.BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
expected_msg = r".*must implement _from_sequence_of_strings.*"
with pytest.raises(NotImplementedError, match=expected_msg):
super().test_EA_types(engine, data)
| bsd-3-clause |
grahesh/Stock-Market-Event-Analysis | qstkutil/tsutil.py | 1 | 29904 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: [email protected]
@summary: Contains tutorial for backtester and report.
'''
import math
import datetime as dt
import numpy as np
from qstkutil import qsdateutil
from math import sqrt
from copy import deepcopy
import random as rand
from qstkutil import DataAccess as da
from qstkutil import qsdateutil as du
import numpy as np
def daily(lfFunds):
"""
@summary Computes daily returns centered around 0
@param funds: A time series containing daily fund values
@return an array of daily returns
"""
nds = np.asarray(deepcopy(lfFunds))
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
returnize0(nds)
return(nds)
def daily1(lfFunds):
"""
@summary Computes daily returns centered around 1
@param funds: A time series containing daily fund values
@return an array of daily returns
"""
nds = np.asarray(deepcopy(lfFunds))
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
returnize1(nds)
return(nds)
def monthly(funds):
"""
@summary Computes monthly returns centered around 0
@param funds: A time series containing daily fund values
@return an array of monthly returns
"""
funds2 = []
last_last_month = -1
years = qsdateutil.getYears(funds)
for year in years:
months = qsdateutil.getMonths(funds, year)
for month in months:
last_this_month = qsdateutil.getLastDay(funds, year, month)
if last_last_month == -1 :
last_last_month=qsdateutil.getFirstDay(funds, year, month)
if type(funds).__name__=='TimeSeries':
funds2.append(funds[last_this_month]/funds[last_last_month]-1)
else:
funds2.append(funds.xs(last_this_month)/funds.xs(last_last_month)-1)
last_last_month = last_this_month
return(funds2)
def average_monthly(funds):
"""
@summary Computes average monthly returns centered around 0
@param funds: A time series containing daily fund values
@return an array of average monthly returns
"""
rets = daily(funds)
ret_i = 0
years = qsdateutil.getYears(funds)
averages = []
for year in years:
months = qsdateutil.getMonths(funds, year)
for month in months:
avg = 0
count = 0
days = qsdateutil.getDays(funds, year, month)
for day in days:
avg += rets[ret_i]
ret_i += 1
count += 1
averages.append(float(avg) / count)
return(averages)
def fillforward(nds):
"""
@summary Removes NaNs from a 2D array by scanning forward in the
1st dimension. If a cell is NaN, the value above it is carried forward.
@param nds: the array to fill forward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(1, nds.shape[0]):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row-1, col]
def fillbackward(nds):
"""
@summary Removes NaNs from a 2D array by scanning backward in the
1st dimension. If a cell is NaN, the value above it is carried backward.
@param nds: the array to fill backward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(nds.shape[0] - 2, -1, -1):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row+1, col]
def returnize0(nds):
"""
@summary Computes stepwise (usually daily) returns relative to 0, where
0 implies no change in value.
@return the array is revised in place
"""
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
nds[1:, :] = (nds[1:, :] / nds[0:-1]) - 1
nds[0, :] = np.zeros(nds.shape[1])
def returnize1(nds):
"""
@summary Computes stepwise (usually daily) returns relative to 1, where
1 implies no change in value.
@param nds: the array to fill backward
@return the array is revised in place
"""
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
nds[1:, :] = (nds[1:, :]/nds[0:-1])
nds[0, :] = np.ones(nds.shape[1])
def priceize1(nds):
"""
@summary Computes stepwise (usually daily) returns relative to 1, where
1 implies no change in value.
@param nds: the array to fill backward
@return the array is revised in place
"""
nds[0, :] = 100
for i in range(1, nds.shape[0]):
nds[i, :] = nds[i-1, :] * nds[i, :]
def logreturnize(nds):
"""
@summary Computes stepwise (usually daily) logarithmic returns.
@param nds: the array to fill backward
@return the array is revised in place
"""
returnize1(nds)
nds = np.log(nds)
return nds
def get_winning_days( rets):
"""
@summary Returns the percentage of winning days of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@return Percentage of winning days
"""
negative_rets = []
for i in rets:
if(i<0):
negative_rets.append(i)
return 100 * (1 - float(len(negative_rets)) / float(len(rets)))
def get_max_draw_down(ts_vals):
"""
@summary Returns the max draw down of the returns.
@param ts_vals: 1d numpy array or fund list
@return Max draw down
"""
MDD = 0
DD = 0
peak = -99999
for value in ts_vals:
if (value > peak):
peak = value
else:
DD = (peak - value) / peak
if (DD > MDD):
MDD = DD
return -1*MDD
def get_sortino_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sortino ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free return, default is 0%
@return Sortino Ratio, computed off daily returns
"""
rets = np.asarray(rets)
f_mean = np.mean( rets, axis=0 )
negative_rets = rets[rets < 0]
f_dev = np.std( negative_rets, axis=0 )
f_sortino = (f_mean*252 - risk_free) / (f_dev * np.sqrt(252))
return f_sortino
def get_sharpe_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sharpe ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free returns, default is 0%
@return Annualized rate of return, not converted to percent
"""
f_dev = np.std( rets, axis=0 )
f_mean = np.mean( rets, axis=0 )
f_sharpe = (f_mean *252 - risk_free) / ( f_dev * np.sqrt(252) )
return f_sharpe
def get_ror_annual( rets ):
"""
@summary Returns the rate of return annualized. Assumes len(rets) is number of days.
@param rets: 1d numpy array or list of daily returns
@return Annualized rate of return, not converted to percent
"""
f_inv = 1.0
for f_ret in rets:
f_inv = f_inv * f_ret
f_ror_ytd = f_inv - 1.0
#print ' RorYTD =', f_inv, 'Over days:', len(rets)
return ( (1.0 + f_ror_ytd)**( 1.0/(len(rets)/252.0) ) ) - 1.0
def getPeriodicRets( dmPrice, sOffset ):
"""
@summary Reindexes a DataMatrix price array and returns the new periodic returns.
@param dmPrice: DataMatrix of stock prices
@param sOffset: Offset string to use, choose from _offsetMap in pandas/core/datetools.py
e.g. 'EOM', 'WEEKDAY', 'W@FRI', 'A@JAN'. Or use a pandas DateOffset.
"""
# Could possibly use DataMatrix.asfreq here """
# Use pandas DateRange to create the dates we want, use 4:00 """
drNewRange = DateRange(dmPrice.index[0], dmPrice.index[-1], timeRule=sOffset)
drNewRange += DateOffset(hours=16)
dmPrice = dmPrice.reindex( drNewRange, method='ffill' )
returnize1( dmPrice.values )
# Do not leave return of 1.0 for first time period: not accurate """
return dmPrice[1:]
def getReindexedRets( rets, l_period ):
"""
@summary Reindexes returns using the cumulative product. E.g. if returns are 1.5 and 1.5, a period of 2 will
produce a 2-day return of 2.25. Note, these must be returns centered around 1.
@param rets: Daily returns of the various stocks (using returnize1)
@param l_period: New target period.
@note: Note that this function does not track actual weeks or months, it only approximates with trading days.
You can use 5 for week, or 21 for month, etc.
"""
naCumData = np.cumprod(rets, axis=0)
lNewRows =(rets.shape[0]-1) / (l_period)
# We compress data into height / l_period + 1 new rows """
for i in range( lNewRows ):
lCurInd = -1 - i*l_period
# Just hold new data in same array"""
# new return is cumprod on day x / cumprod on day x-l_period """
start=naCumData[lCurInd - l_period, :]
naCumData[-1 - i, :] = naCumData[lCurInd, :] / start
# Select new returns from end of cumulative array """
return naCumData[-lNewRows:, ]
def getOptPort(rets, f_target, l_period=1, naLower=None, naUpper=None, lNagDebug=0):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param rets: Daily returns of the various stocks (using returnize1)
@param f_target: Target return, i.e. 0.04 = 4% per period
@param l_period: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
# Attempt to import library """
try:
pass
import nagint as nag
except ImportError:
print 'Could not import NAG library'
print 'make sure nagint.so is in your python path'
return ([], 0, 0)
# Get number of stocks """
lStocks = rets.shape[1]
# If period != 1 we need to restructure the data """
if( l_period != 1 ):
rets = getReindexedRets( rets, l_period)
# Calculate means and covariance """
naAvgRets = np.average( rets, axis=0 )
naCov = np.cov( rets, rowvar=False )
# Special case for None == f_target"""
# simply return average returns and cov """
if( f_target is None ):
return naAvgRets, np.std(rets, axis=0)
# Calculate upper and lower limits of variables as well as constraints """
if( naUpper is None ):
naUpper = np.ones( lStocks ) # max portfolio % is 1
if( naLower is None ):
naLower = np.zeros( lStocks ) # min is 0, set negative for shorting
# Two extra constraints for linear conditions"""
# result = desired return, and sum of weights = 1 """
naUpper = np.append( naUpper, [f_target, 1.0] )
naLower = np.append( naLower, [f_target, 1.0] )
# Initial estimate of portfolio """
naInitial = np.array([1.0/lStocks]*lStocks)
# Set up constraints matrix"""
# composed of expected returns in row one, unity row in row two """
naConstraints = np.vstack( (naAvgRets, np.ones(lStocks)) )
# Get portfolio weights, last entry in array is actually variance """
try:
naReturn = nag.optPort( naConstraints, naLower, naUpper, \
naCov, naInitial, lNagDebug )
except RuntimeError:
print 'NAG Runtime error with target: %.02lf'%(f_target)
return ( naInitial, sqrt( naCov[0][0] ) )
#return semi-junk to not mess up the rest of the plot
# Calculate stdev of entire portfolio to return"""
# what NAG returns is slightly different """
fPortDev = np.std( np.dot(rets, naReturn[0,0:-1]) )
# Show difference between above stdev and sqrt NAG covariance"""
# possibly not taking correlation into account """
#print fPortDev / sqrt(naReturn[0, -1])
# Return weights and stdDev of portfolio."""
# note again the last value of naReturn is NAG's reported variance """
return (naReturn[0, 0:-1], fPortDev)
def OptPort( naData, fTarget, naLower=None, naUpper=None, naExpected=None, s_type = "long"):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param naData: Daily returns of the various stocks (using returnize1)
@param fTarget: Target return, i.e. 0.04 = 4% per period
@param lPeriod: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
''' Attempt to import library '''
try:
pass
from cvxopt import matrix
from cvxopt.blas import dot
from cvxopt.solvers import qp, options
except ImportError:
print 'Could not import CVX library'
return ([],0, True)
''' Get number of stocks '''
length = naData.shape[1]
b_error = False
naLower = deepcopy(naLower)
naUpper = deepcopy(naUpper)
naExpected = deepcopy(naExpected)
# Assuming AvgReturns as the expected returns if parameter is not specified
if (naExpected==None):
naExpected = np.average( naData, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
naData = na_signs*naData
naExpected = na_signs*naExpected
# Covariance matrix of the Data Set
naCov=np.cov(naData, rowvar=False)
# If length is one, just return 100% single symbol
if length == 1:
return (list(na_signs), np.std(naData, axis=0)[0], False)
if length == 0:
return ([], [0], False)
# If we have 0/1 "free" equity we can't optimize
# We just use limits since we are stuck with 0 degrees of freedom
''' Special case for None == fTarget, simply return average returns and cov '''
if( fTarget is None ):
return (naExpected, np.std(naData, axis=0), b_error)
# Upper bound of the Weights of a equity, If not specified, assumed to be 1.
if(naUpper is None):
naUpper= np.ones(length)
# Lower bound of the Weights of a equity, If not specified assumed to be 0 (No shorting case)
if(naLower is None):
naLower= np.zeros(length)
if sum(naLower) == 1:
fPortDev = np.std(np.dot(naData, naLower))
return (naLower, fPortDev, False)
if sum(naUpper) == 1:
fPortDev = np.std(np.dot(naData, naUpper))
return (naUpper, fPortDev, False)
naFree = naUpper != naLower
if naFree.sum() <= 1:
lnaPortfolios = naUpper.copy()
# If there is 1 free we need to modify it to make the total
# Add up to 1
if naFree.sum() == 1:
f_rest = naUpper[~naFree].sum()
lnaPortfolios[naFree] = 1.0 - f_rest
lnaPortfolios = na_signs * lnaPortfolios
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, False)
# Double the covariance of the diagonal elements for calculating risk.
for i in range(length):
naCov[i][i]=2*naCov[i][i]
# Note, returns are modified to all be long from here on out
(fMin, fMax) = getRetRange(False, naLower, naUpper, naExpected, "long")
#print (fTarget, fMin, fMax)
if fTarget<fMin or fTarget>fMax:
print "Target not possible", fTarget, fMin, fMax
b_error = True
naLower = naLower*(-1)
# Setting up the parameters for the CVXOPT Library, it takes inputs in Matrix format.
'''
The Risk minimization problem is a standard Quadratic Programming problem according to the Markowitz Theory.
'''
S=matrix(naCov)
#pbar=matrix(naExpected)
naLower.shape=(length,1)
naUpper.shape=(length,1)
naExpected.shape = (1,length)
zeo=matrix(0.0,(length,1))
I = np.eye(length)
minusI=-1*I
G=matrix(np.vstack((I, minusI)))
h=matrix(np.vstack((naUpper, naLower)))
ones=matrix(1.0,(1,length))
A=matrix(np.vstack((naExpected, ones)))
b=matrix([float(fTarget),1.0])
# Optional Settings for CVXOPT
options['show_progress'] = False
options['abstol']=1e-25
options['reltol']=1e-24
options['feastol']=1e-25
# Optimization Calls
# Optimal Portfolio
try:
lnaPortfolios = qp(S, -zeo, G, h, A, b)['x']
except:
b_error = True
if b_error == True:
print "Optimization not Possible"
na_port = naLower*-1
if sum(na_port) < 1:
if sum(naUpper) == 1:
na_port = naUpper
else:
i=0
while(sum(na_port)<1 and i<25):
naOrder = naUpper - na_port
i = i+1
indices = np.where(naOrder > 0)
na_port[indices]= na_port[indices] + (1-sum(na_port))/len(indices[0])
naOrder = naUpper - na_port
indices = np.where(naOrder < 0)
na_port[indices]= naUpper[indices]
lnaPortfolios = matrix(na_port)
lnaPortfolios = (na_signs.reshape(-1,1) * lnaPortfolios).reshape(-1)
# Expected Return of the Portfolio
# lfReturn = dot(pbar, lnaPortfolios)
# Risk of the portfolio
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, b_error)
def getRetRange( rets, naLower, naUpper, naExpected = "False", s_type = "long"):
"""
@summary Returns the range of possible returns with upper and lower bounds on the portfolio participation
@param rets: Expected returns
@param naLower: List of lower percentages by stock
@param naUpper: List of upper percentages by stock
@return tuple containing (fMin, fMax)
"""
# Calculate theoretical minimum and maximum theoretical returns """
fMin = 0
fMax = 0
rets = deepcopy(rets)
if naExpected == "False":
naExpected = np.average( rets, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
rets = na_signs*rets
naExpected = na_signs*naExpected
naSortInd = naExpected.argsort()
# First add the lower bounds on portfolio participation """
for i, fRet in enumerate(naExpected):
fMin = fMin + fRet*naLower[i]
fMax = fMax + fRet*naLower[i]
# Now calculate minimum returns"""
# allocate the max possible in worst performing equities """
# Subtract min since we have already counted it """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMin = fMin + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMin = fMin - naExpected[lInd] * (fTotalPercent - 1.0)
break
# Repeat for max, just reverse the sort, i.e. high to low """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd[::-1]):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMax = fMax + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMax = fMax - naExpected[lInd] * (fTotalPercent - 1.0)
break
return (fMin, fMax)
def _create_dict(df_rets, lnaPortfolios):
allocations = {}
for i, sym in enumerate(df_rets.columns):
allocations[sym] = lnaPortfolios[i]
return allocations
def optimizePortfolio(df_rets, list_min, list_max, list_price_target,
target_risk, direction="long"):
naLower = np.array(list_min)
naUpper = np.array(list_max)
naExpected = np.array(list_price_target)
b_same_flag = np.all( naExpected == naExpected[0])
if b_same_flag and (naExpected[0] == 0):
naExpected = naExpected + 0.1
if b_same_flag:
na_randomness = np.ones(naExpected.shape)
target_risk = 0
for i in range(len(na_randomness)):
if i%2 ==0:
na_randomness[i] = -1
naExpected = naExpected + naExpected*0.0000001*na_randomness
(fMin, fMax) = getRetRange( df_rets.values, naLower, naUpper,
naExpected, direction)
# Try to avoid intractible endpoints due to rounding errors """
fMin += abs(fMin) * 0.00000000001
fMax -= abs(fMax) * 0.00000000001
if target_risk == 1:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': b_error}
fStep = (fMax - fMin) / 50.0
lfReturn = [fMin + x * fStep for x in range(51)]
lfStd = []
lnaPortfolios = []
for fTarget in lfReturn:
(naWeights, fStd, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
if b_error == False:
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
else:
# Return error on ANY failed optimization
allocations = _create_dict(df_rets, np.zeros(df_rets.shape[1]))
return {'allocations': allocations, 'std_dev': 0.0,
'expected_return': fMax, 'error': True}
if len(lfStd) == 0:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': True}
f_return = lfReturn[lfStd.index(min(lfStd))]
if target_risk == 0:
naPortWeights=lnaPortfolios[lfStd.index(min(lfStd))]
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': min(lfStd), 'expected_return': f_return, 'error': False}
# Otherwise try to hit custom target between 0-1 min-max risk
fTarget = f_return + ((fMax - f_return) * target_risk)
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
def getFrontier( rets, lRes=100, fUpper=0.2, fLower=0.00):
"""
@summary Generates an efficient frontier based on average returns.
@param rets: Array of returns to use
@param lRes: Resolution of the curve, default=100
@param fUpper: Upper bound on portfolio percentage
@param fLower: Lower bound on portfolio percentage
@return tuple containing (lf_ret, lfStd, lnaPortfolios)
lf_ret: List of returns provided by each point
lfStd: list of standard deviations provided by each point
lnaPortfolios: list of numpy arrays containing weights for each portfolio
"""
# Limit/enforce percent participation """
naUpper = np.ones(rets.shape[1]) * fUpper
naLower = np.ones(rets.shape[1]) * fLower
(fMin, fMax) = getRetRange( rets, naLower, naUpper )
# Try to avoid intractible endpoints due to rounding errors """
fMin *= 1.0000001
fMax *= 0.9999999
# Calculate target returns from min and max """
lf_ret = []
for i in range(lRes):
lf_ret.append( (fMax - fMin) * i / (lRes - 1) + fMin )
lfStd = []
lnaPortfolios = []
# Call the function lRes times for the given range, use 1 for period """
for f_target in lf_ret:
(naWeights, fStd) = getOptPort( rets, f_target, 1, \
naUpper=naUpper, naLower=naLower )
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
# plot frontier """
#plt.plot( lfStd, lf_ret )
plt.plot( np.std( rets, axis=0 ), np.average( rets, axis=0 ), \
'g+', markersize=10 )
#plt.show()"""
return (lf_ret, lfStd, lnaPortfolios)
def stockFilter( dmPrice, dmVolume, fNonNan=0.95, fPriceVolume=100*1000 ):
"""
@summary Returns the list of stocks filtered based on various criteria.
@param dmPrice: DataMatrix of stock prices
@param dmVolume: DataMatrix of stock volumes
@param fNonNan: Optional non-nan percent, default is .95
@param fPriceVolume: Optional price*volume, default is 100,000
@return list of stocks which meet the criteria
"""
lsRetStocks = list( dmPrice.columns )
for sStock in dmPrice.columns:
fValid = 0.0
print sStock
# loop through all dates """
for dtDate in dmPrice.index:
# Count null (nan/inf/etc) values """
fPrice = dmPrice[sStock][dtDate]
if( not isnull(fPrice) ):
fValid = fValid + 1
# else test price volume """
fVol = dmVolume[sStock][dtDate]
if( not isnull(fVol) and fVol * fPrice < fPriceVolume ):
lsRetStocks.remove( sStock )
break
# Remove if too many nan values """
if( fValid / len(dmPrice.index) < fNonNan and sStock in lsRetStocks ):
lsRetStocks.remove( sStock )
return lsRetStocks
def getRandPort( lNum, dtStart=None, dtEnd=None, lsStocks=None,\
dmPrice=None, dmVolume=None, bFilter=True, fNonNan=0.95,\
fPriceVolume=100*1000, lSeed=None ):
"""
@summary Returns a random portfolio based on certain criteria.
@param lNum: Number of stocks to be included
@param dtStart: Start date for portfolio
@param dtEnd: End date for portfolio
@param lsStocks: Optional list of ticker symbols, if not provided all symbols will be used
@param bFilter: If False, stocks are not filtered by price or volume data, simply return random Portfolio.
@param dmPrice: Optional price data, if not provided, data access will be queried
@param dmVolume: Optional volume data, if not provided, data access will be queried
@param fNonNan: Optional non-nan percent for filter, default is .95
@param fPriceVolume: Optional price*volume for filter, default is 100,000
@warning: Does not work for all sets of optional inputs, e.g. if you don't include dtStart, dtEnd, you need
to include dmPrice/dmVolume
@return list of stocks which meet the criteria
"""
if( lsStocks is None ):
if( dmPrice is None and dmVolume is None ):
norObj = da.DataAccess('Norgate')
lsStocks = norObj.get_all_symbols()
elif( not dmPrice is None ):
lsStocks = list(dmPrice.columns)
else:
lsStocks = list(dmVolume.columns)
if( dmPrice is None and dmVolume is None and bFilter == True ):
norObj = da.DataAccess('Norgate')
ldtTimestamps = du.getNYSEdays( dtStart, dtEnd, dt.timedelta(hours=16) )
# if dmPrice and dmVol are provided then we don't query it every time """
bPullPrice = False
bPullVol = False
if( dmPrice is None ):
bPullPrice = True
if( dmVolume is None ):
bPullVol = True
# Default seed (none) uses system clock """
rand.seed(lSeed)
lsRetStocks = []
# Loop until we have enough randomly selected stocks """
llRemainingIndexes = range(0,len(lsStocks))
lsValid = None
while( len(lsRetStocks) != lNum ):
lsCheckStocks = []
for i in range( lNum - len(lsRetStocks) ):
lRemaining = len(llRemainingIndexes)
if( lRemaining == 0 ):
print 'Error in getRandPort: ran out of stocks'
return lsRetStocks
# Pick a stock and remove it from the list of remaining stocks """
lPicked = rand.randint(0, lRemaining-1)
lsCheckStocks.append( lsStocks[ llRemainingIndexes.pop(lPicked) ] )
# If bFilter is false"""
# simply return our first list of stocks, don't check prive/vol """
if( not bFilter ):
return sorted(lsCheckStocks)
# Get data if needed """
if( bPullPrice ):
dmPrice = norObj.get_data( ldtTimestamps, lsCheckStocks, 'close' )
# Get data if needed """
if( bPullVol ):
dmVolume = norObj.get_data(ldtTimestamps, lsCheckStocks, 'volume' )
# Only query this once if data is provided"""
# else query every time with new data """
if( lsValid is None or bPullVol or bPullPrice ):
lsValid = stockFilter(dmPrice, dmVolume, fNonNan, fPriceVolume)
for sAdd in lsValid:
if sAdd in lsCheckStocks:
lsRetStocks.append( sAdd )
return sorted(lsRetStocks)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/utils/tests/test_testing.py | 13 | 7883 | import warnings
import unittest
import sys
from sklearn.utils.testing import (
assert_raises,
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
nesterione/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
FluidityStokes/fluidity | examples/tides_in_the_Mediterranean_Sea/Med-tides-probe.py | 2 | 6073 | #!/usr/bin/env python3
import vtktools
import math
from numpy import array
u=vtktools.vtu("tidesmedsea-flat.vtu")
g = open("Med-GEBCO-5m-gauges-fes2004-O1-102", "w")
pts=vtktools.arr([
#[-5.3500, 36.1333, -2.00],
#[-4.4500, 36.7000, 0.00],
#[-3.9167, 35.2500, 0.00],
#[-2.4500, 36.8333, 0.00],
#[-0.5833, 38.3333, 0.00],
[2.6333, 39.5833, 0.00],
#[3.1000, 42.4833, 0.00],
#[5.3500, 43.3000, 0.00],
#[6.9167, 36.8833, 0.00],
[8.0167, 43.8667, 0.00],
[8.3000, 39.1500, 0.00],
[8.9000, 44.4000, 0.00],
[9.1667, 39.2000, 0.00],
#[9.8500, 44.0667, 0.00],
#[10.1167, 33.8833, 0.00],
[10.3000, 43.5333, 0.00],
#[10.7667, 34.7333, 0.00],
#[11.1167, 33.5000, 0.00],
#[11.7833, 42.1000, 0.00],
[12.0000, 36.7833, 0.00],
#[12.3333, 45.4167, 0.00],
[12.5000, 35.5000, 0.00],
[12.5833, 37.6333, 0.00],
[12.8167, 36.1667, 0.00],
[13.2000, 32.9000, 0.00],
#[13.3333, 38.1333, 0.00],
#[13.5000, 43.6167, 0.00],
[13.5000, 37.2500, 0.00],
[13.7500, 45.6500, 0.00],
[13.9333, 40.7333, 0.00],
#[14.2667, 40.8667, 0.00],
[14.4000, 42.5167, 0.00],
[14.5167, 35.9000, 0.00],
#[14.5333, 45.3000, 0.00],
[14.9667, 38.4833, 0.00],
#[15.1000, 38.5000, 0.00],
[15.1500, 36.6833, 0.00],
#[15.2500, 38.2167, 0.00],
#[15.6500, 38.1167, 0.00],
[15.7667, 43.0333, 0.00],
[16.1833, 41.8833, 0.00],
#[16.4333, 43.5000, 0.00],
#[17.2167, 40.4667, 0.00],
#[17.9333, 40.6333, 0.00],
#[18.5000, 40.1500, 0.00],
[19.1000, 42.0667, 0.00],
#[20.7000, 38.8333, 0.00],
[21.3167, 37.6333, 0.00],
#[22.1333, 37.0167, 0.00],
#[23.0333, 40.6167, 0.00],
[23.8000, 32.1833, 0.00],
#[24.0500, 35.5000, 0.00],
[24.9167, 37.4333, 0.00],
#[25.1333, 35.3333, 0.00],
[25.3833, 40.8500, 0.00],
[25.7000, 31.7667, 0.00],
[26.1500, 38.3833, 0.00],
[26.8833, 37.0833, 0.00],
[28.2333, 36.4333, 0.00],
[29.8667, 31.2000, 0.00],
[32.3167, 31.2667, 0.00],
#[33.3167, 35.3333, 0.00],
#[33.9500, 35.1167, 0.00]
])
M2amp = u.ProbeData(pts, "M2amp")
(ilen, jlen) = M2amp.shape
S2amp = u.ProbeData(pts, "S2amp")
K1amp = u.ProbeData(pts, "K1amp")
O1amp = u.ProbeData(pts, "O1amp")
for i in range(ilen):
g.write("%f\n" % M2amp[i][0])
ampcm=M2amp*100
M2_tideGauge_amp = array([
[29.8],
[18.0],
[18.0],
[9.0],
[2.0],
[3.0],
[4.6],
[7.0],
[5.6],
[8.3],
[6.5],
[8.6],
[7.6],
[9.4],
[51.1],
[8.5],
[41.6],
[21.9],
[10.9],
[1.6],
[23.4],
[6.6],
[4.3],
[4.8],
[11.1],
[10.6],
[6.6],
[4.5],
[26.3],
[12.0],
[11.1],
[6.4],
[6.0],
[10.6],
[12.0],
#[6.4],
[6.7],
[12.0],
[6.2],
[6.8],
[7.9],
[8.0],
[6.5],
[8.7],
[7.0],
[9.2],
[4.0],
[3.3],
[2.2],
[9.0],
[1.4],
[1.0],
[2.0],
[1.5],
[7.1],
[2.9],
[4.4],
[2.1],
[4.4],
[7.2],
[11.2],
[10.1],
[11.0]
])
from math import sqrt
ampdiff=ampcm-M2_tideGauge_amp
ampdiff2=ampdiff**2
a = sum(sum(ampdiff2))/62
RMS=sqrt(a)
print("RMS difference of M2 Amp (cm):",RMS)
S2_tide_guage_data_amp = array([
[10.7],
[7],
[7],
[4],
[1],
[1],
[1.8],
[2],
[2.2],
[3.4],
[2.6],
[3.2],
[2.8],
[3.4],
[36.4],
[3.4],
[26.7],
[15.3],
[4.1],
[1.9],
[14.1],
[4.2],
[1.8],
[3.1],
[5.4],
[4.1],
[3.6],
[3.2],
[15.2],
[5],
[4.4],
[4.5],
[4],
[5.5],
[4.5],
#[3.4],
[3.5],
[4.7],
[3.1],
[4.4],
[5.1],
[5.6],
[3.7],
[5.2],
[4],
[5.6],
[2.2],
[1.6],
[1.1],
[6.1],
[1.3],
[0.8],
[1],
[1.1],
[5],
[2.9],
[2.9],
[1.3],
[2.7],
[4.1],
[6.9],
[6.4],
[7.3]
])
S2ampcm=S2amp*100
S2ampdiff=S2ampcm-S2_tide_guage_data_amp
S2ampdiff2=S2ampdiff**2
S2a = sum(sum(S2ampdiff2))/62
S2RMS=sqrt(S2a)
print("RMS difference of S2 Amp (cm):",S2RMS)
K1_tide_guage_data_amp= array([
[2],
[3],
[4],
[3],
[4],
[4],
[3.2],
[3],
[2.3],
[3.6],
[3.2],
[3.3],
[3.2],
[3.7],
[2.5],
[4],
[1.8],
[2],
[2.7],
[2],
[17.9],
[0.9],
[3.5],
[0.5],
[2],
[3.2],
[13],
[1.8],
[19.7],
[3],
[2.8],
[9.7],
[1],
[13.8],
[3.1],
#[1.5],
[1.9],
[3.3],
[1.3],
[6.8],
[4.2],
[8.8],
[1.8],
[4.6],
[2.3],
[4.8],
[1.4],
[1.3],
[1.2],
[2.6],
[0.6],
[1.4],
[1.9],
[1.8],
[0.3],
[1.2],
[2.3],
[2],
[1.8],
[1.7],
[2.1],
[2.4],
[2.1]
])
K1ampcm=K1amp*100
K1ampdiff=K1ampcm-K1_tide_guage_data_amp
K1ampdiff2=K1ampdiff**2
K1a = sum(sum(K1ampdiff2))/62
K1RMS=sqrt(K1a)
print("RMS difference of K1 Amp (cm):",K1RMS)
O1_tide_guage_data_amp= array([
[0.9],
[2],
[1],
[2],
[2],
[2],
[1.9],
[2],
[2],
[1.6],
[1.9],
[1.4],
[1.8],
[1.4],
[0.5],
[1.8],
[0.8],
[0.9],
[1.2],
[1.4],
[5.6],
[0.7],
[1.6],
[0.9],
[0.6],
[1.2],
[4.2],
[1.4],
[6.1],
[1],
[1],
[3.4],
[1],
[4.1],
[1.1],
#[1.1],
[0.9],
[1.1],
[0.9],
[2.5],
[1.5],
[2.7],
[0.8],
[1.5],
[1],
[1.4],
[0.6],
[0.5],
[0.5],
[1.3],
[0.5],
[0.6],
[1],
[0.9],
[1.3],
[0.8],
[1.3],
[1.1],
[1.1],
[1.3],
[1.7],
[1.8],
[1.8]
])
O1ampcm=O1amp*100
O1ampdiff=O1ampcm-O1_tide_guage_data_amp
O1ampdiff2=O1ampdiff**2
O1a = sum(sum(O1ampdiff2))/62
O1RMS=sqrt(O1a)
print("RMS difference of O1 Amp (cm):",O1RMS)
import fluidity_tools
from matplotlib import pylab
pylab.plot(ampcm,M2_tideGauge_amp)
pylab.xlabel("Fluidity")
pylab.ylabel("Tide Gauge")
pylab.show()
import matplotlib
matplotlib.pyplot.scatter(M2_tideGauge_amp,ampcm,s=20, c='b', marker='o')
#pylab.xlabel("Tide Gauge M2 Amplitude (cm)")
#pylab.ylabel("Fluidity M2 Amplitude (cm)")
#x=([0,70])
#y=([0,70])
#matplotlib.pyplot.plot(y,x, label="y=x")
#pylab.ylim(ymax=70.0,ymin=0.0)
#pylab.xlim(xmax=70.0,xmin=0.0)
#pylab.show()
#matplotlib.pyplot.scatter(S2_tide_guage_data_amp,S2ampcm,s=20, c='b', marker='o')
#pylab.xlabel("Tide Gauge S2 Amplitude (cm)")
#pylab.ylabel("Fluidity S2 Amplitude (cm)")
#x=([0,70])
#y=([0,70])
#matplotlib.pyplot.plot(y,x, label="y=x")
#pylab.ylim(ymax=70.0,ymin=0.0)
#pylab.xlim(xmax=70.0,xmin=0.0)
#pylab.show()
#matplotlib.pyplot.scatter(K1_tide_guage_data_amp,K1ampcm,s=20, c='b', marker='o')
#pylab.xlabel("Tide Gauge K1 Amplitude (cm)")
#pylab.ylabel("Fluidity K1 Amplitude (cm)")
#x=([0,20])
#y=([0,20])
#matplotlib.pyplot.plot(y,x, label="y=x")
#pylab.ylim(ymax=20.0,ymin=0.0)
#pylab.xlim(xmax=20.0,xmin=0.0)
#pylab.show()
#matplotlib.pyplot.scatter(O1_tide_guage_data_amp,O1ampcm,s=20, c='b', marker='o')
#pylab.xlabel("Tide Gauge O1 Amplitude (cm)")
#pylab.ylabel("Fluidity O1 Amplitude (cm)")
#x=([0,20])
#y=([0,20])
#matplotlib.pyplot.plot(y,x, label="y=x")
#pylab.ylim(ymax=20.0,ymin=0.0)
#pylab.xlim(xmax=20.0,xmin=0.0)
#pylab.show()
| lgpl-2.1 |
kirangonella/BuildingMachineLearningSystemsWithPython | ch08/chapter.py | 21 | 6372 | import numpy as np # NOT IN BOOK
from matplotlib import pyplot as plt # NOT IN BOOK
def load():
import numpy as np
from scipy import sparse
data = np.loadtxt('data/ml-100k/u.data')
ij = data[:, :2]
ij -= 1 # original data is in 1-based system
values = data[:, 2]
reviews = sparse.csc_matrix((values, ij.T)).astype(float)
return reviews.toarray()
reviews = load()
U,M = np.where(reviews)
import random
test_idxs = np.array(random.sample(range(len(U)), len(U)//10))
train = reviews.copy()
train[U[test_idxs], M[test_idxs]] = 0
test = np.zeros_like(reviews)
test[U[test_idxs], M[test_idxs]] = reviews[U[test_idxs], M[test_idxs]]
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
if self.axis == 1:
features = features.T
# count features that are greater than zero in axis 0:
binary = (features > 0)
count0 = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count0[count0 == 0] = 1.
# computing the mean is easy:
self.mean = features.sum(axis=0)/count0
# only consider differences where binary is True:
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count0)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
norm = NormalizePositive(axis=1)
binary = (train > 0)
train = norm.fit_transform(train)
# plot just 200x200 area for space reasons
plt.imshow(binary[:200, :200], interpolation='nearest')
from scipy.spatial import distance
# compute all pair-wise distances:
dists = distance.pdist(binary, 'correlation')
# Convert to square form, so that dists[i,j]
# is distance between binary[i] and binary[j]:
dists = distance.squareform(dists)
neighbors = dists.argsort(axis=1)
# We are going to fill this matrix with results
filled = train.copy()
for u in range(filled.shape[0]):
# n_u is neighbors of user
n_u = neighbors[u, 1:]
for m in range(filled.shape[1]):
# get relevant reviews in order!
revs = [train[neigh, m]
for neigh in n_u
if binary [neigh, m]]
if len(revs):
# n is the number of reviews for this movie
n = len(revs)
# take half of the reviews plus one into consideration:
n //= 2
n += 1
revs = revs[:n]
filled[u,m] = np.mean(revs)
predicted = norm.inverse_transform(filled)
from sklearn import metrics
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary neighbors): {:.1%}'.format(r2))
reviews = reviews.T
# use same code as before
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary movie neighbors): {:.1%}'.format(r2))
from sklearn.linear_model import ElasticNetCV # NOT IN BOOK
reg = ElasticNetCV(alphas=[
0.0125, 0.025, 0.05, .125, .25, .5, 1., 2., 4.])
filled = train.copy()
# iterate over all users:
for u in range(train.shape[0]):
curtrain = np.delete(train, u, axis=0)
bu = binary[u]
reg.fit(curtrain[:,bu].T, train[u, bu])
filled[u, ~bu] = reg.predict(curtrain[:,~bu].T)
predicted = norm.inverse_transform(filled)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (user regression): {:.1%}'.format(r2))
# SHOPPING BASKET ANALYSIS
# This is the slow version of the code, which will take a long time to
# complete.
from collections import defaultdict
from itertools import chain
# File is downloaded as a compressed file
import gzip
# file format is a line per transaction
# of the form '12 34 342 5...'
dataset = [[int(tok) for tok in line.strip().split()]
for line in gzip.open('data/retail.dat.gz')]
dataset = [set(d) for d in dataset]
# count how often each product was purchased:
counts = defaultdict(int)
for elem in chain(*dataset):
counts[elem] += 1
minsupport = 80
valid = set(k for k,v in counts.items() if (v >= minsupport))
itemsets = [frozenset([v]) for v in valid]
freqsets = []
for i in range(16):
nextsets = []
tested = set()
for it in itemsets:
for v in valid:
if v not in it:
# Create a new candidate set by adding v to it
c = (it | frozenset([v]))
# check If we have tested it already
if c in tested:
continue
tested.add(c)
# Count support by looping over dataset
# This step is slow.
# Check `apriori.py` for a better implementation.
support_c = sum(1 for d in dataset if d.issuperset(c))
if support_c > minsupport:
nextsets.append(c)
freqsets.extend(nextsets)
itemsets = nextsets
if not len(itemsets):
break
print("Finished!")
minlift = 5.0
nr_transactions = float(len(dataset))
for itemset in freqsets:
for item in itemset:
consequent = frozenset([item])
antecedent = itemset-consequent
base = 0.0
# acount: antecedent count
acount = 0.0
# ccount : consequent count
ccount = 0.0
for d in dataset:
if item in d: base += 1
if d.issuperset(itemset): ccount += 1
if d.issuperset(antecedent): acount += 1
base /= nr_transactions
p_y_given_x = ccount/acount
lift = p_y_given_x / base
if lift > minlift:
print('Rule {0} -> {1} has lift {2}'
.format(antecedent, consequent,lift))
| mit |
ShawnMurd/MetPy | src/metpy/plots/skewt.py | 1 | 36666 | # Copyright (c) 2014,2015,2016,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Make Skew-T Log-P based plots.
Contain tools for making Skew-T Log-P plots, including the base plotting class,
`SkewT`, as well as a class for making a `Hodograph`.
"""
from contextlib import ExitStack
import warnings
import matplotlib
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib.collections import LineCollection
import matplotlib.colors as mcolors
from matplotlib.patches import Circle
from matplotlib.projections import register_projection
import matplotlib.spines as mspines
from matplotlib.ticker import MultipleLocator, NullFormatter, ScalarFormatter
import matplotlib.transforms as transforms
import numpy as np
from ._util import colored_line
from ..calc import dewpoint, dry_lapse, el, lcl, moist_lapse, vapor_pressure
from ..calc.tools import _delete_masked_points
from ..interpolate import interpolate_1d
from ..package_tools import Exporter
from ..units import concatenate, units
exporter = Exporter(globals())
class SkewTTransform(transforms.Affine2D):
"""Perform Skew transform for Skew-T plotting.
This works in pixel space, so is designed to be applied after the normal plotting
transformations.
"""
def __init__(self, bbox, rot):
"""Initialize skew transform.
This needs a reference to the parent bounding box to do the appropriate math and
to register it as a child so that the transform is invalidated and regenerated if
the bounding box changes.
"""
super().__init__()
self._bbox = bbox
self.set_children(bbox)
self.invalidate()
# We're not trying to support changing the rotation, so go ahead and convert to
# the right factor for skewing here and just save that.
self._rot_factor = np.tan(np.deg2rad(rot))
def get_matrix(self):
"""Return transformation matrix."""
if self._invalid:
# The following matrix is equivalent to the following:
# x0, y0 = self._bbox.xmin, self._bbox.ymin
# self.translate(-x0, -y0).skew_deg(self._rot, 0).translate(x0, y0)
# Setting it this way is just more efficient.
self._mtx = np.array([[1.0, self._rot_factor, -self._rot_factor * self._bbox.ymin],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# Need to clear both the invalid flag *and* reset the inverse, which is cached
# by the parent class.
self._invalid = 0
self._inverted = None
return self._mtx
class SkewXTick(maxis.XTick):
r"""Make x-axis ticks for Skew-T plots.
This class adds to the standard :class:`matplotlib.axis.XTick` dynamic checking
for whether a top or bottom tick is actually within the data limits at that part
and draw as appropriate. It also performs similar checking for gridlines.
"""
# Taken from matplotlib's SkewT example to update for matplotlib 3.1's changes to
# state management for ticks. See matplotlib/matplotlib#10088
def draw(self, renderer):
"""Draw the tick."""
# When adding the callbacks with `stack.callback`, we fetch the current
# visibility state of the artist with `get_visible`; the ExitStack will
# restore these states (`set_visible`) at the end of the block (after
# the draw).
with ExitStack() as stack:
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
stack.callback(artist.set_visible, artist.get_visible())
self.tick1line.set_visible(self.tick1line.get_visible() and self.lower_in_bounds)
self.label1.set_visible(self.label1.get_visible() and self.lower_in_bounds)
self.tick2line.set_visible(self.tick2line.get_visible() and self.upper_in_bounds)
self.label2.set_visible(self.label2.get_visible() and self.upper_in_bounds)
self.gridline.set_visible(self.gridline.get_visible() and self.grid_in_bounds)
super().draw(renderer)
@property
def lower_in_bounds(self):
"""Whether the lower part of the tick is in bounds."""
return transforms.interval_contains(self.axes.lower_xlim, self.get_loc())
@property
def upper_in_bounds(self):
"""Whether the upper part of the tick is in bounds."""
return transforms.interval_contains(self.axes.upper_xlim, self.get_loc())
@property
def grid_in_bounds(self):
"""Whether any of the tick grid line is in bounds."""
return transforms.interval_contains(self.axes.xaxis.get_view_interval(),
self.get_loc())
class SkewXAxis(maxis.XAxis):
r"""Make an x-axis that works properly for Skew-T plots.
This class exists to force the use of our custom :class:`SkewXTick` as well
as provide a custom value for interval that combines the extents of the
upper and lower x-limits from the axes.
"""
def _get_tick(self, major):
# Warning stuff can go away when we only support Matplotlib >=3.3
with warnings.catch_warnings():
warnings.simplefilter('ignore', getattr(
matplotlib, 'MatplotlibDeprecationWarning', DeprecationWarning))
return SkewXTick(self.axes, None, label=None, major=major)
# Needed to properly handle tight bbox
def _get_tick_bboxes(self, ticks, renderer):
"""Return lists of bboxes for ticks' label1's and label2's."""
return ([tick.label1.get_window_extent(renderer)
for tick in ticks if tick.label1.get_visible() and tick.lower_in_bounds],
[tick.label2.get_window_extent(renderer)
for tick in ticks if tick.label2.get_visible() and tick.upper_in_bounds])
def get_view_interval(self):
"""Get the view interval."""
return self.axes.upper_xlim[0], self.axes.lower_xlim[1]
class SkewSpine(mspines.Spine):
r"""Make an x-axis spine that works properly for Skew-T plots.
This class exists to use the separate x-limits from the axes to properly
locate the spine.
"""
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
class SkewXAxes(Axes):
r"""Make a set of axes for Skew-T plots.
This class handles registration of the skew-xaxes as a projection as well as setting up
the appropriate transformations. It also makes sure we use our instances for spines
and x-axis: :class:`SkewSpine` and :class:`SkewXAxis`. It provides properties to
facilitate finding the x-limits for the bottom and top of the plot as well.
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def __init__(self, *args, **kwargs):
r"""Initialize `SkewXAxes`.
Parameters
----------
args : Arbitrary positional arguments
Passed to :class:`matplotlib.axes.Axes`
position: int, optional
The rotation of the x-axis against the y-axis, in degrees.
kwargs : Arbitrary keyword arguments
Passed to :class:`matplotlib.axes.Axes`
"""
# This needs to be popped and set before moving on
self.rot = kwargs.pop('rotation', 30)
super().__init__(*args, **kwargs)
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
# pylint: disable=unused-argument
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""Set limits and transforms.
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# Get the standard transform setup from the Axes base class
super()._set_lim_and_transforms()
# This transformation handles the skewing
skew_trans = SkewTTransform(self.bbox, self.rot)
# Create the full transform from Data to Pixels
self.transData += skew_trans
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform += skew_trans
@property
def lower_xlim(self):
"""Get the data limits for the x-axis along the bottom of the axes."""
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
"""Get the data limits for the x-axis along the top of the axes."""
return self.transData.inverted().transform([[self.bbox.xmin, self.bbox.ymax],
self.bbox.max])[:, 0]
# Now register the projection with matplotlib so the user can select it.
register_projection(SkewXAxes)
@exporter.export
class SkewT(object):
r"""Make Skew-T log-P plots of data.
This class simplifies the process of creating Skew-T log-P plots in
using matplotlib. It handles requesting the appropriate skewed projection,
and provides simplified wrappers to make it easy to plot data, add wind
barbs, and add other lines to the plots (e.g. dry adiabats)
Attributes
----------
ax : `matplotlib.axes.Axes`
The underlying Axes instance, which can be used for calling additional
plot functions (e.g. `axvline`)
"""
def __init__(self, fig=None, rotation=30, subplot=None, rect=None, aspect=80.5):
r"""Create SkewT - logP plots.
Parameters
----------
fig : matplotlib.figure.Figure, optional
Source figure to use for plotting. If none is given, a new
:class:`matplotlib.figure.Figure` instance will be created.
rotation : float or int, optional
Controls the rotation of temperature relative to horizontal. Given
in degrees counterclockwise from x-axis. Defaults to 30 degrees.
subplot : tuple[int, int, int] or `matplotlib.gridspec.SubplotSpec` instance, optional
Controls the size/position of the created subplot. This allows creating
the skewT as part of a collection of subplots. If subplot is a tuple, it
should conform to the specification used for
:meth:`matplotlib.figure.Figure.add_subplot`. The
:class:`matplotlib.gridspec.SubplotSpec`
can be created by using :class:`matplotlib.gridspec.GridSpec`.
rect : tuple[float, float, float, float], optional
Rectangle (left, bottom, width, height) in which to place the axes. This
allows the user to place the axes at an arbitrary point on the figure.
aspect : float, int, or 'auto', optional
Aspect ratio (i.e. ratio of y-scale to x-scale) to maintain in the plot.
Defaults to 80.5. Passing the string ``'auto'`` tells matplotlib to handle
the aspect ratio automatically (this is not recommended for SkewT).
"""
if fig is None:
import matplotlib.pyplot as plt
figsize = plt.rcParams.get('figure.figsize', (7, 7))
fig = plt.figure(figsize=figsize)
self._fig = fig
if rect and subplot:
raise ValueError("Specify only one of `rect' and `subplot', but not both")
elif rect:
self.ax = fig.add_axes(rect, projection='skewx', rotation=rotation)
else:
if subplot is not None:
# Handle being passed a tuple for the subplot, or a GridSpec instance
try:
len(subplot)
except TypeError:
subplot = (subplot,)
else:
subplot = (1, 1, 1)
self.ax = fig.add_subplot(*subplot, projection='skewx', rotation=rotation)
# Set the yaxis as inverted with log scaling
self.ax.set_yscale('log')
# Override default ticking for log scaling
self.ax.yaxis.set_major_formatter(ScalarFormatter())
self.ax.yaxis.set_major_locator(MultipleLocator(100))
self.ax.yaxis.set_minor_formatter(NullFormatter())
# Needed to make sure matplotlib doesn't freak out and create a bunch of ticks
# Also takes care of inverting the y-axis
self.ax.set_ylim(1050, 100)
self.ax.yaxis.set_units(units.hPa)
# Try to make sane default temperature plotting ticks
self.ax.xaxis.set_major_locator(MultipleLocator(10))
self.ax.xaxis.set_units(units.degC)
self.ax.set_xlim(-40, 50)
self.ax.grid(True)
self.mixing_lines = None
self.dry_adiabats = None
self.moist_adiabats = None
# Maintain a reasonable ratio of data limits. Only works on Matplotlib >= 3.2
if matplotlib.__version__[:3] > '3.1':
self.ax.set_aspect(aspect, adjustable='box')
def plot(self, pressure, t, *args, **kwargs):
r"""Plot data.
Simple wrapper around plot so that pressure is the first (independent)
input. This is essentially a wrapper around `plot`.
Parameters
----------
pressure : array_like
pressure values
t : array_like
temperature values, can also be used for things like dew point
args
Other positional arguments to pass to :func:`~matplotlib.pyplot.plot`
kwargs
Other keyword arguments to pass to :func:`~matplotlib.pyplot.plot`
Returns
-------
list[matplotlib.lines.Line2D]
lines plotted
See Also
--------
:func:`matplotlib.pyplot.plot`
"""
# Skew-T logP plotting
t, pressure = _delete_masked_points(t, pressure)
return self.ax.plot(t, pressure, *args, **kwargs)
def plot_barbs(self, pressure, u, v, c=None, xloc=1.0, x_clip_radius=0.1,
y_clip_radius=0.08, **kwargs):
r"""Plot wind barbs.
Adds wind barbs to the skew-T plot. This is a wrapper around the
`barbs` command that adds to appropriate transform to place the
barbs in a vertical line, located as a function of pressure.
Parameters
----------
pressure : array_like
pressure values
u : array_like
U (East-West) component of wind
v : array_like
V (North-South) component of wind
c:
An optional array used to map colors to the barbs
xloc : float, optional
Position for the barbs, in normalized axes coordinates, where 0.0
denotes far left and 1.0 denotes far right. Defaults to far right.
x_clip_radius : float, optional
Space, in normalized axes coordinates, to leave before clipping
wind barbs in the x-direction. Defaults to 0.1.
y_clip_radius : float, optional
Space, in normalized axes coordinates, to leave above/below plot
before clipping wind barbs in the y-direction. Defaults to 0.08.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Other keyword arguments to pass to :func:`~matplotlib.pyplot.barbs`
Returns
-------
matplotlib.quiver.Barbs
instance created
See Also
--------
:func:`matplotlib.pyplot.barbs`
"""
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Assemble array of x-locations in axes space
x = np.empty_like(pressure)
x.fill(xloc)
# Do barbs plot at this location
if c is not None:
b = self.ax.barbs(x, pressure, u, v, c,
transform=self.ax.get_yaxis_transform(which='tick2'),
clip_on=True, zorder=2, **kwargs)
else:
b = self.ax.barbs(x, pressure, u, v,
transform=self.ax.get_yaxis_transform(which='tick2'),
clip_on=True, zorder=2, **kwargs)
# Override the default clip box, which is the axes rectangle, so we can have
# barbs that extend outside.
ax_bbox = transforms.Bbox([[xloc - x_clip_radius, -y_clip_radius],
[xloc + x_clip_radius, 1.0 + y_clip_radius]])
b.set_clip_box(transforms.TransformedBbox(ax_bbox, self.ax.transAxes))
return b
def plot_dry_adiabats(self, t0=None, pressure=None, **kwargs):
r"""Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an alpha
value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
t0 : array_like, optional
Starting temperature values in Kelvin. If none are given, they will be
generated using the current temperature range at the bottom of
the plot.
pressure : array_like, optional
Pressure values to be included in the dry adiabats. If not
specified, they will be linearly distributed across the current
plotted pressure range.
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.LineCollection`
Returns
-------
matplotlib.collections.LineCollection
instance created
See Also
--------
:func:`~metpy.calc.thermo.dry_lapse`
:meth:`plot_moist_adiabats`
:class:`matplotlib.collections.LineCollection`
"""
# Remove old lines
if self.dry_adiabats:
self.dry_adiabats.remove()
# Determine set of starting temps if necessary
if t0 is None:
xmin, xmax = self.ax.get_xlim()
t0 = np.arange(xmin, xmax + 1, 10) * units.degC
# Get pressure levels based on ylims if necessary
if pressure is None:
pressure = np.linspace(*self.ax.get_ylim()) * units.mbar
# Assemble into data for plotting
t = dry_lapse(pressure, t0[:, np.newaxis], 1000. * units.mbar).to(units.degC)
linedata = [np.vstack((ti.m, pressure.m)).T for ti in t]
# Add to plot
kwargs.setdefault('colors', 'r')
kwargs.setdefault('linestyles', 'dashed')
kwargs.setdefault('alpha', 0.5)
self.dry_adiabats = self.ax.add_collection(LineCollection(linedata, **kwargs))
return self.dry_adiabats
def plot_moist_adiabats(self, t0=None, pressure=None, **kwargs):
r"""Plot moist adiabats.
Adds saturated pseudo-adiabats (lines of constant equivalent potential
temperature) to the plot. The default style of these lines is dashed
blue lines with an alpha value of 0.5. These can be overridden using
keyword arguments.
Parameters
----------
t0 : array_like, optional
Starting temperature values in Kelvin. If none are given, they will be
generated using the current temperature range at the bottom of
the plot.
pressure : array_like, optional
Pressure values to be included in the moist adiabats. If not
specified, they will be linearly distributed across the current
plotted pressure range.
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.LineCollection`
Returns
-------
matplotlib.collections.LineCollection
instance created
See Also
--------
:func:`~metpy.calc.thermo.moist_lapse`
:meth:`plot_dry_adiabats`
:class:`matplotlib.collections.LineCollection`
"""
# Remove old lines
if self.moist_adiabats:
self.moist_adiabats.remove()
# Determine set of starting temps if necessary
if t0 is None:
xmin, xmax = self.ax.get_xlim()
t0 = np.concatenate((np.arange(xmin, 0, 10),
np.arange(0, xmax + 1, 5))) * units.degC
# Get pressure levels based on ylims if necessary
if pressure is None:
pressure = np.linspace(*self.ax.get_ylim()) * units.mbar
# Assemble into data for plotting
t = moist_lapse(pressure, t0[:, np.newaxis], 1000. * units.mbar).to(units.degC)
linedata = [np.vstack((ti.m, pressure.m)).T for ti in t]
# Add to plot
kwargs.setdefault('colors', 'b')
kwargs.setdefault('linestyles', 'dashed')
kwargs.setdefault('alpha', 0.5)
self.moist_adiabats = self.ax.add_collection(LineCollection(linedata, **kwargs))
return self.moist_adiabats
def plot_mixing_lines(self, mixing_ratio=None, pressure=None, **kwargs):
r"""Plot lines of constant mixing ratio.
Adds lines of constant mixing ratio (isohumes) to the
plot. The default style of these lines is dashed green lines with an
alpha value of 0.8. These can be overridden using keyword arguments.
Parameters
----------
mixing_ratio : array_like, optional
Unitless mixing ratio values to plot. If none are given, default
values are used.
pressure : array_like, optional
Pressure values to be included in the isohumes. If not
specified, they will be linearly distributed across the current
plotted pressure range up to 600 mb.
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.LineCollection`
Returns
-------
matplotlib.collections.LineCollection
instance created
See Also
--------
:class:`matplotlib.collections.LineCollection`
"""
# Remove old lines
if self.mixing_lines:
self.mixing_lines.remove()
# Default mixing level values if necessary
if mixing_ratio is None:
mixing_ratio = np.array([0.0004, 0.001, 0.002, 0.004, 0.007, 0.01,
0.016, 0.024, 0.032]).reshape(-1, 1)
# Set pressure range if necessary
if pressure is None:
pressure = np.linspace(600, max(self.ax.get_ylim())) * units.mbar
# Assemble data for plotting
td = dewpoint(vapor_pressure(pressure, mixing_ratio))
linedata = [np.vstack((t.m, pressure.m)).T for t in td]
# Add to plot
kwargs.setdefault('colors', 'g')
kwargs.setdefault('linestyles', 'dashed')
kwargs.setdefault('alpha', 0.8)
self.mixing_lines = self.ax.add_collection(LineCollection(linedata, **kwargs))
return self.mixing_lines
def shade_area(self, y, x1, x2=0, which='both', **kwargs):
r"""Shade area between two curves.
Shades areas between curves. Area can be where one is greater or less than the other
or all areas shaded.
Parameters
----------
y : array_like
1-dimensional array of numeric y-values
x1 : array_like
1-dimensional array of numeric x-values
x2 : array_like
1-dimensional array of numeric x-values
which : string
Specifies if `positive`, `negative`, or `both` areas are being shaded.
Will be overridden by where.
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.PolyCollection`
Returns
-------
:class:`matplotlib.collections.PolyCollection`
See Also
--------
:class:`matplotlib.collections.PolyCollection`
:func:`matplotlib.axes.Axes.fill_betweenx`
"""
fill_properties = {'positive':
{'facecolor': 'tab:red', 'alpha': 0.4, 'where': x1 > x2},
'negative':
{'facecolor': 'tab:blue', 'alpha': 0.4, 'where': x1 < x2},
'both':
{'facecolor': 'tab:green', 'alpha': 0.4, 'where': None}}
try:
fill_args = fill_properties[which]
fill_args.update(kwargs)
except KeyError:
raise ValueError('Unknown option for which: {0}'.format(str(which)))
arrs = y, x1, x2
if fill_args['where'] is not None:
arrs = arrs + (fill_args['where'],)
fill_args.pop('where', None)
fill_args['interpolate'] = True
arrs = _delete_masked_points(*arrs)
return self.ax.fill_betweenx(*arrs, **fill_args)
def shade_cape(self, pressure, t, t_parcel, **kwargs):
r"""Shade areas of Convective Available Potential Energy (CAPE).
Shades areas where the parcel is warmer than the environment (areas of positive
buoyancy.
Parameters
----------
pressure : array_like
Pressure values
t : array_like
Temperature values
dewpoint : array_like
Dewpoint values
t_parcel : array_like
Parcel path temperature values
limit_shading : bool
Eliminate shading below the LCL or above the EL, default is True
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.PolyCollection`
Returns
-------
:class:`matplotlib.collections.PolyCollection`
See Also
--------
:class:`matplotlib.collections.PolyCollection`
:func:`matplotlib.axes.Axes.fill_betweenx`
"""
return self.shade_area(pressure, t_parcel, t, which='positive', **kwargs)
def shade_cin(self, pressure, t, t_parcel, dewpoint=None, **kwargs):
r"""Shade areas of Convective INhibition (CIN).
Shades areas where the parcel is cooler than the environment (areas of negative
buoyancy. If `dewpoint` is passed in, negative area below the lifting condensation
level or above the equilibrium level is not shaded.
Parameters
----------
pressure : array_like
Pressure values
t : array_like
Temperature values
t_parcel : array_like
Parcel path temperature values
dewpoint : array_like
Dew point values, optional
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.PolyCollection`
Returns
-------
:class:`matplotlib.collections.PolyCollection`
See Also
--------
:class:`matplotlib.collections.PolyCollection`
:func:`matplotlib.axes.Axes.fill_betweenx`
"""
if dewpoint is not None:
lcl_p, _ = lcl(pressure[0], t[0], dewpoint[0])
el_p, _ = el(pressure, t, dewpoint, t_parcel)
idx = np.logical_and(pressure > el_p, pressure < lcl_p)
else:
idx = np.arange(0, len(pressure))
return self.shade_area(pressure[idx], t_parcel[idx], t[idx], which='negative',
**kwargs)
@exporter.export
class Hodograph(object):
r"""Make a hodograph of wind data.
Plots the u and v components of the wind along the x and y axes, respectively.
This class simplifies the process of creating a hodograph using matplotlib.
It provides helpers for creating a circular grid and for plotting the wind as a line
colored by another value (such as wind speed).
Attributes
----------
ax : `matplotlib.axes.Axes`
The underlying Axes instance used for all plotting
"""
def __init__(self, ax=None, component_range=80):
r"""Create a Hodograph instance.
Parameters
----------
ax : `matplotlib.axes.Axes`, optional
The `Axes` instance used for plotting
component_range : value
The maximum range of the plot. Used to set plot bounds and control the maximum
number of grid rings needed.
"""
if ax is None:
import matplotlib.pyplot as plt
self.ax = plt.figure().add_subplot(1, 1, 1)
else:
self.ax = ax
self.ax.set_aspect('equal', 'box')
self.ax.set_xlim(-component_range, component_range)
self.ax.set_ylim(-component_range, component_range)
# == sqrt(2) * max_range, which is the distance at the corner
self.max_range = 1.4142135 * component_range
def add_grid(self, increment=10., **kwargs):
r"""Add grid lines to hodograph.
Creates lines for the x- and y-axes, as well as circles denoting wind speed values.
Parameters
----------
increment : value, optional
The value increment between rings
kwargs
Other kwargs to control appearance of lines
See Also
--------
:class:`matplotlib.patches.Circle`
:meth:`matplotlib.axes.Axes.axhline`
:meth:`matplotlib.axes.Axes.axvline`
"""
# Some default arguments. Take those, and update with any
# arguments passed in
grid_args = {'color': 'grey', 'linestyle': 'dashed'}
if kwargs:
grid_args.update(kwargs)
# Take those args and make appropriate for a Circle
circle_args = grid_args.copy()
color = circle_args.pop('color', None)
circle_args['edgecolor'] = color
circle_args['fill'] = False
self.rings = []
for r in np.arange(increment, self.max_range, increment):
c = Circle((0, 0), radius=r, **circle_args)
self.ax.add_patch(c)
self.rings.append(c)
# Add lines for x=0 and y=0
self.yaxis = self.ax.axvline(0, **grid_args)
self.xaxis = self.ax.axhline(0, **grid_args)
@staticmethod
def _form_line_args(kwargs):
"""Simplify taking the default line style and extending with kwargs."""
def_args = {'linewidth': 3}
def_args.update(kwargs)
return def_args
def plot(self, u, v, **kwargs):
r"""Plot u, v data.
Plots the wind data on the hodograph.
Parameters
----------
u : array_like
u-component of wind
v : array_like
v-component of wind
kwargs
Other keyword arguments to pass to :meth:`matplotlib.axes.Axes.plot`
Returns
-------
list[matplotlib.lines.Line2D]
lines plotted
See Also
--------
:meth:`Hodograph.plot_colormapped`
"""
line_args = self._form_line_args(kwargs)
u, v = _delete_masked_points(u, v)
return self.ax.plot(u, v, **line_args)
def wind_vectors(self, u, v, **kwargs):
r"""Plot u, v data as wind vectors.
Plot the wind data as vectors for each level, beginning at the origin.
Parameters
----------
u : array_like
u-component of wind
v : array_like
v-component of wind
kwargs
Other keyword arguments to pass to :meth:`matplotlib.axes.Axes.quiver`
Returns
-------
matplotlib.quiver.Quiver
arrows plotted
"""
quiver_args = {'units': 'xy', 'scale': 1}
quiver_args.update(**kwargs)
center_position = np.zeros_like(u)
return self.ax.quiver(center_position, center_position,
u, v, **quiver_args)
def plot_colormapped(self, u, v, c, intervals=None, colors=None, **kwargs):
r"""Plot u, v data, with line colored based on a third set of data.
Plots the wind data on the hodograph, but with a colormapped line. Takes a third
variable besides the winds (e.g. heights or pressure levels) and either a colormap to
color it with or a series of contour intervals and colors to create a colormap and
norm to control colormapping. The intervals must always be in increasing
order. For using custom contour intervals with height data, the function will
automatically interpolate to the contour intervals from the height and wind data,
as well as convert the input contour intervals from height AGL to MSL to work with the
provided heights.
Parameters
----------
u : array_like
u-component of wind
v : array_like
v-component of wind
c : array_like
data to use for colormapping (e.g. heights, pressure, wind speed)
intervals: array-like, optional
Array of intervals for c to use in coloring the hodograph.
colors: list, optional
Array of strings representing colors for the hodograph segments.
kwargs
Other keyword arguments to pass to :class:`matplotlib.collections.LineCollection`
Returns
-------
matplotlib.collections.LineCollection
instance created
See Also
--------
:meth:`Hodograph.plot`
"""
u, v, c = _delete_masked_points(u, v, c)
# Plotting a color segmented hodograph
if colors:
cmap = mcolors.ListedColormap(colors)
# If we are segmenting by height (a length), interpolate the contour intervals
if intervals.dimensionality == {'[length]': 1.0}:
# Find any intervals not in the data and interpolate them
interpolation_heights = [bound.m for bound in intervals if bound not in c]
interpolation_heights = np.array(interpolation_heights) * intervals.units
interpolation_heights = (np.sort(interpolation_heights.magnitude)
* interpolation_heights.units)
(interpolated_heights, interpolated_u,
interpolated_v) = interpolate_1d(interpolation_heights, c, c, u, v)
# Combine the interpolated data with the actual data
c = concatenate([c, interpolated_heights])
u = concatenate([u, interpolated_u])
v = concatenate([v, interpolated_v])
sort_inds = np.argsort(c)
c = c[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Unit conversion required for coloring of bounds/data in dissimilar units
# to work properly.
c = c.to_base_units() # TODO: This shouldn't be required!
intervals = intervals.to_base_units()
# If segmenting by anything else, do not interpolate, just use the data
else:
intervals = np.asarray(intervals) * intervals.units
norm = mcolors.BoundaryNorm(intervals.magnitude, cmap.N)
cmap.set_over('none')
cmap.set_under('none')
kwargs['cmap'] = cmap
kwargs['norm'] = norm
line_args = self._form_line_args(kwargs)
# Plotting a continuously colored line
else:
line_args = self._form_line_args(kwargs)
# Do the plotting
lc = colored_line(u, v, c, **line_args)
self.ax.add_collection(lc)
return lc
| bsd-3-clause |
coolsgupta/machine_learning_nanodegree | Supervised_Learning/naive bayes/creating_the_GNB_clf.py | 1 | 1349 | #doc: http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html
from sklearn.naive_bayes import GaussianNB
def classify(features_train, labels_train):
### import the sklearn module for GaussianNB
### create classifier
### fit the classifier on the training features and labels
### return the fit classifier
### your code goes here!
clf = GaussianNB()
clf.fit(features_train, labels_train)
return clf
def NBAccuracy(features_train, labels_train, features_test, labels_test):
""" compute the accuracy of your Naive Bayes classifier """
### import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
### create la
clf = GaussianNB()
### fit the classifier on the training features and labels
#TODO
clf.fit(features_train,labels_train)
### use the trained classifier to predict labels for the test features
pred = clf.predict(features_test)
### calculate and return the accuracy on the test data
### this is slightly different than the example,
### where we just print the accuracy
### you might need to import an sklearn module
accuracy = clf.score(features_test,labels_test)
#or
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(pred,labels_test)
return accuracy | mit |
dariomangoni/chrono | src/demos/python/chrono-tensorflow/PPO/value_function.py | 4 | 7169 | """
State-Value Function
"""
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
import os.path
class NNValueFunction(object):
""" NN-based state-value function """
def __init__(self, obs_dim, env_name, MultiGPU = False):
"""
Args:
obs_dim: number of dimensions in observation vector (int)
"""
self.env_name = env_name
self.multiGPU = MultiGPU
self.replay_buffer_x = None
self.replay_buffer_y = None
self.obs_dim = obs_dim
self.epochs = 10
self.lr = None # learning rate set in _build_graph()
self.savedmodel = os.path.isfile("./savedmodel/"+self.env_name+"/VF/checkpoint")
directory = "./savedmodel/"+self.env_name+"/VF/"
if self.savedmodel :
self._restore()
else:
if not os.path.exists(directory):
os.makedirs(directory)
self._build_graph()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
def _build_graph(self):
""" Construct TensorFlow graph, including loss function, init op and train op """
self.g = tf.Graph()
with self.g.as_default():
self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs_valfunc')
self.val_ph = tf.placeholder(tf.float32, (None,), 'val_valfunc')
# hid1 layer size is 10x obs_dim, hid3 size is 10, and hid2 is geometric mean
hid1_size = self.obs_dim * 10 # 10 chosen empirically on 'Hopper-v1'
hid3_size = 5 # 5 chosen empirically on 'Hopper-v1'
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 1e-2 / np.sqrt(hid2_size) # 1e-3 empirically determined
print('Value Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}'
.format(hid1_size, hid2_size, hid3_size, self.lr))
# 3 hidden layers with tanh activations
out = tf.layers.dense(self.obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / self.obs_dim)), name="h1VF")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2VF")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3VF")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
self.out = tf.squeeze(out)
self.loss = tf.reduce_mean(tf.square(self.out - self.val_ph), name='lossVF') # squared loss
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.minimize(self.loss, name='train_opVF')
self.init = tf.global_variables_initializer()
self.saverVF = tf.train.Saver()
if self.multiGPU :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.1
self.sess = tf.Session(graph=self.g, config=config)
else:
self.sess = tf.Session(graph=self.g)
self.sess.run(self.init)
def _restore(self):
if self.multiGPU :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.1
self.sess = tf.Session(config=config)
else:
self.sess = tf.Session()
loader = tf.train.import_meta_graph("./savedmodel/"+self.env_name+"/VF/trained_VF.ckpt.meta")
self.sess.run(tf.global_variables_initializer())
self.g = tf.get_default_graph()
self.obs_ph = self.g.get_tensor_by_name('obs_valfunc:0')
self.val_ph = self.g.get_tensor_by_name('val_valfunc:0')
out = self.g.get_tensor_by_name('output/BiasAdd:0')
self.out = tf.squeeze(out)
self.loss = self.g.get_tensor_by_name('lossVF:0')
self.train_op = self.g.get_operation_by_name('train_opVF')
self.lr = 1e-2 / np.sqrt(int(np.sqrt(self.obs_dim * 10 * 5)))
self.saverVF = tf.train.Saver()
loader.restore(self.sess, tf.train.latest_checkpoint("./savedmodel/"+self.env_name+"/VF"))
def fit(self, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
# minibatches of 256 tuples. Trining set is pretty big when the episode is long (steps_perepisode*episode_per batch)
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
#alla prima iterazione buffer coincide con ultimo dato, a quelle seguenti lo incollo al buffer
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
# flush buffar. the last one wil be appended to the next
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = self.sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
self.saverVF.save(self.sess, "./savedmodel/"+self.env_name+"/VF/trained_VF.ckpt")
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def predict(self, x):
""" Predict method """
feed_dict = {self.obs_ph: x}
y_hat = self.sess.run(self.out, feed_dict=feed_dict)
return np.squeeze(y_hat)
def close_sess(self):
""" Close TensorFlow session """
self.saverVF.save(self.sess, "./savedmodel/"+self.env_name+"/VF/trained_VF.ckpt")
self.sess.close()
| bsd-3-clause |
kose-y/pylearn2 | pylearn2/scripts/plot_monitor.py | 37 | 10204 | #!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
| bsd-3-clause |
prheenan/GeneralUtil | python/Plot/Inset.py | 1 | 1864 | # force floating point division. Can still use integer with //
from __future__ import division
# other good compatibility recquirements for python3
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from GeneralUtil.python import PlotUtilities
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes,mark_inset
def slice_by_x(x,y,xlim):
"""
slices x and y by xlim:
Args:
<x/y>: arrays of the same shape:
xlim: the limits for x-limits
Returns:
tuple of (sliced x, sliced y, y limits)
"""
x = np.array(x)
y = np.array(y)
# plot the data red where we will zoom in
where_region = np.where( (x >= min(xlim)) &
(x <= max(xlim)))
assert x.shape == y.shape , "Arrays should be the same shape "
zoom_x = x[where_region]
zoom_y = y[where_region]
ylim = [min(zoom_y),max(zoom_y)]
return zoom_x,zoom_y,ylim
def zoomed_axis(ax=plt.gca(),xlim=[None,None],ylim=[None,None],
remove_ticks=True,zoom=1,borderpad=1,loc=4,**kw):
"""
Creates a (pretty) zoomed axis
Args:
ax: which axis to zoom on
<x/y>_lim: the axes limits
remove_ticks: if true, removes the x and y ticks, to reduce clutter
remaining args: passed to zoomed_inset_axes
Returns:
the inset axis
"""
axins = zoomed_inset_axes(ax, zoom=zoom, loc=loc,borderpad=borderpad)
axins.set_xlim(*xlim) # apply the x-limits
axins.set_ylim(*ylim) # apply the y-limits
if (remove_ticks):
PlotUtilities.no_x_anything(axins)
PlotUtilities.no_y_anything(axins)
return axins
| gpl-2.0 |
th0mmeke/toyworld | evaluators/plot_molecular_diversity.py | 1 | 1177 | """
Created on 6/05/2013
@author: thom
"""
from plot import Plot
from evaluator import Evaluator
from molecular_population import MolecularPopulation
import matplotlib.colors as colors
class PlotMolecularDiversity(Plot):
def draw_figure(self, f1, results_filename, **kwargs):
results = Evaluator.load_results(results_filename)
population = MolecularPopulation(population=results['initial_population'], reactions=results['reactions']) # , size=500)
diversity = []
for t in population.get_times():
slice = population.get_slice_by_time([t])
quantities = [slice.get_quantity(item) for item in slice.get_items() if slice.get_quantity(item) > 0]
iteration_diversity = (1.0 * len(quantities)) / (1.0 * sum(quantities))
diversity.append(iteration_diversity)
ax = f1.add_subplot(1, 1, 1) # one row, one column, first plot
ax.set_title('Diversity of Molecular Types')
ax.set_xlabel('Time')
ax.set_ylabel('Diversity')
ax.set_xlim(left=0, right=population.get_times()[-1])
ax.plot(diversity, color=colors.cnames['slategray'])
ax.grid()
| gpl-3.0 |
kezilu/pextant | pextant/analysis/loadWaypoints.py | 2 | 5515 | import json
import pandas as pd
from copy import deepcopy
from pextant.lib.geoshapely import *
import numpy as np
import re
import os
pd.options.display.max_rows = 5
from pathlib2 import Path
def loadPointsOld(filename):
parsed_json = json.loads(jsonInput)
waypoints = []
for element in parsed_json: # identify all of the waypoints
if element["type"] == "Station":
lon, lat = element["geometry"]["coordinates"]
time_cost = element["userDuration"]
waypoints.append(GeoPolygon(LAT_LONG, lon, lat))
return waypoints, parsed_json
def get_gps_data(filename):
"""
Gets GPS time series gathered from a traversal
:param filename: <String> csv file from GPS team in format |date|time|name|latitude|longitude|heading
:return: <pandas DataFrame> time_stamp|latitude|longitude
"""
delimiter = r"\s+" # some of the columns are separated by a space, others by tabs, use regex to include both
header_row = 0 # the first row has all the header names
df = pd.read_csv(filename, sep=delimiter, header=header_row)
df['date_time'] = pd.to_datetime(df['epoch timestamp'], unit='s')
time_lat_long = df[['date_time', 'latitude', 'longitude']]
gp = GeoPolygon(LAT_LONG, *df[['latitude', 'longitude']].as_matrix().transpose())
return gp
#TODO: Need to move this over to test file
#filename = '../../data/ev_tracks/20161104A_EV1.csv'
#time_lat_long = get_gps_data(filename)
def sextant_loader(filepath):
with open(filepath) as data_file:
jsondata = json.load(data_file)
latlongInter = np.array(jsondata['geometry']['coordinates']).transpose()
return GeoPolygon(LONG_LAT, *latlongInter)
#this really is a xpjson loader
class JSONloader:
def __init__(self, sequence, raw, filename=None):
self.extension = '_plan.json'
if isinstance(filename, Path):
filename = str(filename.absolute())
self.filename = filename
self.raw = raw
self.sequence = sequence
@classmethod
def from_string(cls, str):
return cls(json.loads(str))
@classmethod
def from_file(cls, filepath):
if isinstance(filepath, Path):
filepath = str(filepath.absolute())
stem = os.path.basename(filepath).split('.')[0]
parent = os.path.dirname(filepath)
fullfilename = os.path.join(parent, stem)
with open(filepath) as data_file:
jsondata = json.load(data_file)
return cls(jsondata['sequence'], jsondata, fullfilename)
def get_waypoints(self):
#print('HI')
#print(self.sequence)
#print('Hi again')
ways_and_segments = self.sequence
s = pd.DataFrame(ways_and_segments)
waypoints = s[s['type'] == 'Station']['geometry']
w = waypoints.values.tolist()
latlongFull = pd.DataFrame(w)
latlongInter = np.array(latlongFull['coordinates'].values.tolist()).transpose()
return GeoPolygon(LONG_LAT, *latlongInter)
def get_segments(self):
ways_and_segments = self.sequence
s = pd.DataFrame(ways_and_segments)
waypoints = s[s['type'] == 'Segment']['geometry']
w = waypoints.values.tolist()
latlongFull = pd.DataFrame(w)
latlongInter = latlongFull['coordinates'].values.tolist()
waypointslatlong = []
for elt in latlongInter:
waypointslatlong.extend(elt)
return GeoPolygon(LONG_LAT, *np.array(waypointslatlong).transpose())
def add_search_sol(self, segments, write_to_file=False):
ways_and_segments = deepcopy(self.sequence)
segment_iter = iter(segments)
for i, element in enumerate(ways_and_segments):
if element["type"] == "Segment":
segment = segment_iter.next().tojson()
ways_and_segments[i]["derivedInfo"].update(segment["derivedInfo"]) #merges our new info
ways_and_segments[i]["geometry"] = segment["geometry"]
raw_json = json.dumps(ways_and_segments)
formatted_json = json.dumps(ways_and_segments, indent=4, sort_keys=True)
if write_to_file and self.filename:
rawfile = self.raw
rawfile["sequence"] = ways_and_segments
new_filename = self.filename + self.extension
with open(new_filename, 'w') as outfile:
json.dump(rawfile, outfile, indent=4, sort_keys=True)
return raw_json
if __name__ == '__main__':
from pextant.settings import *
md = JSONloader.from_file(MD_HI[6])
sextantsol = md.get_segments()
test =json.dumps([{u'commands': [], u'uuid': u'ccf34b91-86f4-47ee-b03d-3dbbba6ba167',
u'geometry': {u'type': u'Point', u'coordinates': [-155.20191861222781, 19.366498026755977]}, u'tolerance': 0.6,
u'userDuration': 0, u'boundary': 0.6, u'type': u'Station', u'id': u'HIL11_A_WAY0'}, {
u'derivedInfo': {u'durationSeconds': 28, u'straightLineDurationSeconds': 28,
u'distanceMeters': 25.15366493675656}, u'commands': [], u'type': u'Segment',
u'id': u'HIL11_A_SEG1', u'uuid': u'69aa6e5f-6a10-4568-bfea-5bfbc8417ba7'},
{u'commands': [], u'uuid': u'1a159ed9-77ee-4f79-9163-e3685a01a00c',
u'geometry': {u'type': u'Point', u'coordinates': [-155.2016858384008, 19.36644374514718]}, u'tolerance': 0.6,
u'userDuration': 0, u'boundary': 0.6, u'type': u'Station', u'id': u'HIL11_A_WAY1'}])
jloader = JSONloader(test)
jloader.get_waypoints() | mit |
astropy/astropy | astropy/nddata/utils.py | 2 | 31965 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import sys
import types
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.utils import lazyproperty
from astropy.utils.decorators import AstropyDeprecationWarning
from astropy.wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from astropy.wcs import Sip
from .blocks import block_reduce as _block_reduce
from .blocks import block_replicate as _block_replicate
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'NoOverlapError', 'PartialOverlapError',
'Cutout2D']
# this can be replaced with PEP562 when the minimum required Python
# version is 3.7
class _ModuleWithDeprecation(types.ModuleType):
def __getattribute__(self, name):
deprecated = ('block_reduce', 'block_replicate')
if name in deprecated:
warnings.warn(f'{name} was moved to the astropy.nddata.blocks '
'module. Please update your import statement.',
AstropyDeprecationWarning)
return object.__getattribute__(self, f'_{name}')
return object.__getattribute__(self, name)
sys.modules[__name__].__class__ = _ModuleWithDeprecation
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if any(~np.isfinite(position)):
raise ValueError('Input position contains invalid values (NaNs or '
'infs).')
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# define the min/max pixel indices
indices_min = [int(np.ceil(pos - (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
indices_max = [int(np.ceil(pos + (small_shape / 2.)))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, indices_min),
min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -indices_min),
min(large_shape - indices_min,
indices_max - indices_min))
for (indices_min, indices_max, large_shape) in
zip(indices_min, indices_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += ('fill_value is inconsistent with the data type of '
'the input array (e.g., fill_value cannot be set to '
'np.nan if the input array has integer type). Please '
'change either the input array dtype or the '
'fill_value.',)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(large_shape >= small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape,
position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : ndarray
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, or `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : float or int, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : (2,) tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : (2,) tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : (2,) tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : (2,) tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : (2,) tuple of slice object
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or None
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if wcs is None:
wcs = getattr(data, 'wcs', None)
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs.array_shape = self.data.shape
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@staticmethod
def _round(a):
"""
Round the input to the nearest integer.
If two integers are equally close, the value is rounded up.
Note that this is different from `np.round`, which rounds to the
nearest even number.
"""
return int(np.floor(a + 0.5))
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (self._round(self.input_position_original[0]),
self._round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (self._round(self.input_position_cutout[0]),
self._round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
| bsd-3-clause |
f3r/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
op7ic/LeakGenerator | leakme.py | 1 | 351132 | # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import random
import string
import itertools
import re
import sys
import hashlib
import binascii
import optparse
# Resources
# https://raw.githubusercontent.com/neo/discourse_heroku/master/lib/common_passwords/10k-common-passwords.txt
# https://raw.githubusercontent.com/dominictarr/random-name/master/names.txt
# http://code.activestate.com/recipes/65215-e-mail-address-validation/
emailregex = "^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$"
common_passwords = ['password','123456','12345678','1234','qwerty','12345','dragon','pussy','baseball','football','letmein','monkey','696969','abc123','mustang','michael','shadow','master','jennifer','111111','2000','jordan','superman','harley','1234567','fuckme','hunter','fuckyou','trustno1','ranger','buster','thomas','tigger','robert','soccer','fuck','batman','test','pass','killer','hockey','george','charlie','andrew','michelle','love','sunshine','jessica','asshole','6969','pepper','daniel','access','123456789','654321','joshua','maggie','starwars','silver','william','dallas','yankees','123123','ashley','666666','hello','amanda','orange','biteme','freedom','computer','sexy','thunder','nicole','ginger','heather','hammer','summer','corvette','taylor','fucker','austin','1111','merlin','matthew','121212','golfer','cheese','princess','martin','chelsea','patrick','richard','diamond','yellow','bigdog','secret','asdfgh','sparky','cowboy','camaro','anthony','matrix','falcon','iloveyou','bailey','guitar','jackson','purple','scooter','phoenix','aaaaaa','morgan','tigers','porsche','mickey','maverick','cookie','nascar','peanut','justin','131313','money','horny','samantha','panties','steelers','joseph','snoopy','boomer','whatever','iceman','smokey','gateway','dakota','cowboys','eagles','chicken','dick','black','zxcvbn','please','andrea','ferrari','knight','hardcore','melissa','compaq','coffee','booboo','bitch','johnny','bulldog','xxxxxx','welcome','james','player','ncc1701','wizard','scooby','charles','junior','internet','bigdick','mike','brandy','tennis','blowjob','banana','monster','spider','lakers','miller','rabbit','enter','mercedes','brandon','steven','fender','john','yamaha','diablo','chris','boston','tiger','marine','chicago','rangers','gandalf','winter','bigtits','barney','edward','raiders','porn','badboy','blowme','spanky','bigdaddy','johnson','chester','london','midnight','blue','fishing','000000','hannah','slayer','11111111','rachel','sexsex','redsox','thx1138','asdf','marlboro','panther','zxcvbnm','arsenal','oliver','qazwsx','mother','victoria','7777777','jasper','angel','david','winner','crystal','golden','butthead','viking','jack','iwantu','shannon','murphy','angels','prince','cameron','girls','madison','wilson','carlos','hooters','willie','startrek','captain','maddog','jasmine','butter','booger','angela','golf','lauren','rocket','tiffany','theman','dennis','liverpoo','flower','forever','green','jackie','muffin','turtle','sophie','danielle','redskins','toyota','jason','sierra','winston','debbie','giants','packers','newyork','jeremy','casper','bubba','112233','sandra','lovers','mountain','united','cooper','driver','tucker','helpme','fucking','pookie','lucky','maxwell','8675309','bear','suckit','gators','5150','222222','shithead','fuckoff','jaguar','monica','fred','happy','hotdog','tits','gemini','lover','xxxxxxxx','777777','canada','nathan','victor','florida','88888888','nicholas','rosebud','metallic','doctor','trouble','success','stupid','tomcat','warrior','peaches','apples','fish','qwertyui','magic','buddy','dolphins','rainbow','gunner','987654','freddy','alexis','braves','cock','2112','1212','cocacola','xavier','dolphin','testing','bond007','member','calvin','voodoo','7777','samson','alex','apollo','fire','tester','walter','beavis','voyager','peter','porno','bonnie','rush2112','beer','apple','scorpio','jonathan','skippy','sydney','scott','red123','power','gordon','travis','beaver','star','jackass','flyers','boobs','232323','zzzzzz','steve','rebecca','scorpion','doggie','legend','ou812','yankee','blazer','bill','runner','birdie','bitches','555555','parker','topgun','asdfasdf','heaven','viper','animal','2222','bigboy','4444','arthur','baby','private','godzilla','donald','williams','lifehack','phantom','dave','rock','august','sammy','cool','brian','platinum','jake','bronco','paul','mark','frank','heka6w2','copper','billy','cumshot','garfield','willow','cunt','little','carter','slut','albert','69696969','kitten','super','jordan23','eagle1','shelby','america','11111','jessie','house','free','123321','chevy','bullshit','white','broncos','horney','surfer','nissan','999999','saturn','airborne','elephant','marvin','shit','action','adidas','qwert','kevin','1313','explorer','walker','police','christin','december','benjamin','wolf','sweet','therock','king','online','dickhead','brooklyn','teresa','cricket','sharon','dexter','racing','penis','gregory','0000','teens','redwings','dreams','michigan','hentai','magnum','87654321','nothing','donkey','trinity','digital','333333','stella','cartman','guinness','123abc','speedy','buffalo','kitty','pimpin','eagle','einstein','kelly','nelson','nirvana','vampire','xxxx','playboy','louise','pumpkin','snowball','test123','girl','sucker','mexico','beatles','fantasy','ford','gibson','celtic','marcus','cherry','cassie','888888','natasha','sniper','chance','genesis','hotrod','reddog','alexande','college','jester','passw0rd','bigcock','smith','lasvegas','carmen','slipknot','3333','death','kimberly','1q2w3e','eclipse','1q2w3e4r','stanley','samuel','drummer','homer','montana','music','aaaa','spencer','jimmy','carolina','colorado','creative','hello1','rocky','goober','friday','bollocks','scotty','abcdef','bubbles','hawaii','fluffy','mine','stephen','horses','thumper','5555','pussies','darkness','asdfghjk','pamela','boobies','buddha','vanessa','sandman','naughty','douglas','honda','matt','azerty','6666','shorty','money1','beach','loveme','4321','simple','poohbear','444444','badass','destiny','sarah','denise','vikings','lizard','melanie','assman','sabrina','nintendo','water','good','howard','time','123qwe','november','xxxxx','october','leather','bastard','young','101010','extreme','hard','password1','vincent','pussy1','lacrosse','hotmail','spooky','amateur','alaska','badger','paradise','maryjane','poop','crazy','mozart','video','russell','vagina','spitfire','anderson','norman','eric','cherokee','cougar','barbara','long','420420','family','horse','enigma','allison','raider','brazil','blonde','jones','55555','dude','drowssap','jeff','school','marshall','lovely','1qaz2wsx','jeffrey','caroline','franklin','booty','molly','snickers','leslie','nipples','courtney','diesel','rocks','eminem','westside','suzuki','daddy','passion','hummer','ladies','zachary','frankie','elvis','reggie','alpha','suckme','simpson','patricia','147147','pirate','tommy','semperfi','jupiter','redrum','freeuser','wanker','stinky','ducati','paris','natalie','babygirl','bishop','windows','spirit','pantera','monday','patches','brutus','houston','smooth','penguin','marley','forest','cream','212121','flash','maximus','nipple','bobby','bradley','vision','pokemon','champion','fireman','indian','softball','picard','system','clinton','cobra','enjoy','lucky1','claire','claudia','boogie','timothy','marines','security','dirty','admin','wildcats','pimp','dancer','hardon','veronica','fucked','abcd1234','abcdefg','ironman','wolverin','remember','great','freepass','bigred','squirt','justice','francis','hobbes','kermit','pearljam','mercury','domino','9999','denver','brooke','rascal','hitman','mistress','simon','tony','bbbbbb','friend','peekaboo','naked','budlight','electric','sluts','stargate','saints','bondage','brittany','bigman','zombie','swimming','duke','qwerty1','babes','scotland','disney','rooster','brenda','mookie','swordfis','candy','duncan','olivia','hunting','blink182','alicia','8888','samsung','bubba1','whore','virginia','general','passport','aaaaaaaa','erotic','liberty','arizona','jesus','abcd','newport','skipper','rolltide','balls','happy1','galore','christ','weasel','242424','wombat','digger','classic','bulldogs','poopoo','accord','popcorn','turkey','jenny','amber','bunny','mouse','007007','titanic','liverpool','dreamer','everton','friends','chevelle','carrie','gabriel','psycho','nemesis','burton','pontiac','connor','eatme','lickme','roland','cumming','mitchell','ireland','lincoln','arnold','spiderma','patriots','goblue','devils','eugene','empire','asdfg','cardinal','brown','shaggy','froggy','qwer','kawasaki','kodiak','people','phpbb','light','54321','kramer','chopper','hooker','honey','whynot','lesbian','lisa','baxter','adam','snake','teen','ncc1701d','qqqqqq','airplane','britney','avalon','sandy','sugar','sublime','stewart','wildcat','raven','scarface','elizabet','123654','trucks','wolfpack','pervert','lawrence','raymond','redhead','american','alyssa','bambam','movie','woody','shaved','snowman','tiger1','chicks','raptor','1969','stingray','shooter','france','stars','madmax','kristen','sports','jerry','789456','garcia','simpsons','lights','ryan','looking','chronic','alison','hahaha','packard','hendrix','perfect','service','spring','srinivas','spike','katie','252525','oscar','brother','bigmac','suck','single','cannon','georgia','popeye','tattoo','texas','party','bullet','taurus','sailor','wolves','panthers','japan','strike','flowers','pussycat','chris1','loverboy','berlin','sticky','marina','tarheels','fisher','russia','connie','wolfgang','testtest','mature','bass','catch22','juice','michael1','nigger','159753','women','alpha1','trooper','hawkeye','head','freaky','dodgers','pakistan','machine','pyramid','vegeta','katana','moose','tinker','coyote','infinity','inside','pepsi','letmein1','bang','control','hercules','morris','james1','tickle','outlaw','browns','billybob','pickle','test1','michele','antonio','sucks','pavilion','changeme','caesar','prelude','tanner','adrian','darkside','bowling','wutang','sunset','robbie','alabama','danger','zeppelin','juan','rusty','pppppp','nick','2001','ping','darkstar','madonna','qwe123','bigone','casino','cheryl','charlie1','mmmmmm','integra','wrangler','apache','tweety','qwerty12','bobafett','simone','none','business','sterling','trevor','transam','dustin','harvey','england','2323','seattle','ssssss','rose','harry','openup','pandora','pussys','trucker','wallace','indigo','storm','malibu','weed','review','babydoll','doggy','dilbert','pegasus','joker','catfish','flipper','valerie','herman','fuckit','detroit','kenneth','cheyenne','bruins','stacey','smoke','joey','seven','marino','fetish','xfiles','wonder','stinger','pizza','babe','pretty','stealth','manutd','gracie','gundam','cessna','longhorn','presario','mnbvcxz','wicked','mustang1','victory','21122112','shelly','awesome','athena','q1w2e3r4','help','holiday','knicks','street','redneck','12341234','casey','gizmo','scully','dragon1','devildog','triumph','eddie','bluebird','shotgun','peewee','ronnie','angel1','daisy','special','metallica','madman','country','impala','lennon','roscoe','omega','access14','enterpri','miranda','search','smitty','blizzard','unicorn','tight','rick','ronald','asdf1234','harrison','trigger','truck','danny','home','winnie','beauty','thailand','1234567890','cadillac','castle','tyler','bobcat','buddy1','sunny','stones','asian','freddie','chuck','butt','loveyou','norton','hellfire','hotsex','indiana','short','panzer','lonewolf','trumpet','colors','blaster','12121212','fireball','logan','precious','aaron','elaine','jungle','atlanta','gold','corona','curtis','nikki','polaris','timber','theone','baller','chipper','orlando','island','skyline','dragons','dogs','benson','licker','goldie','engineer','kong','pencil','basketba','open','hornet','world','linda','barbie','chan','farmer','valentin','wetpussy','indians','larry','redman','foobar','travel','morpheus','bernie','target','141414','hotstuff','photos','laura','savage','holly','rocky1','fuck_inside','dollar','turbo','design','newton','hottie','moon','202020','blondes','4128','lestat','avatar','future','goforit','random','abgrtyu','jjjjjj','cancer','q1w2e3','smiley','goldberg','express','virgin','zipper','wrinkle1','stone','andy','babylon','dong','powers','consumer','dudley','monkey1','serenity','samurai','99999999','bigboobs','skeeter','lindsay','joejoe','master1','aaaaa','chocolat','christia','birthday','stephani','tang','1234qwer','alfred','ball','98765432','maria','sexual','maxima','77777777','sampson','buckeye','highland','kristin','seminole','reaper','bassman','nugget','lucifer','airforce','nasty','watson','warlock','2121','philip','always','dodge','chrissy','burger','bird','snatch','missy','pink','gang','maddie','holmes','huskers','piglet','photo','joanne','hamilton','dodger','paladin','christy','chubby','buckeyes','hamlet','abcdefgh','bigfoot','sunday','manson','goldfish','garden','deftones','icecream','blondie','spartan','julie','harold','charger','brandi','stormy','sherry','pleasure','juventus','rodney','galaxy','holland','escort','zxcvb','planet','jerome','wesley','blues','song','peace','david1','ncc1701e','1966','51505150','cavalier','gambit','karen','sidney','ripper','oicu812','jamie','sister','marie','martha','nylons','aardvark','nadine','minnie','whiskey','bing','plastic','anal','babylon5','chang','savannah','loser','racecar','insane','yankees1','mememe','hansolo','chiefs','fredfred','freak','frog','salmon','concrete','yvonne','zxcv','shamrock','atlantis','warren','wordpass','julian','mariah','rommel','1010','harris','predator','sylvia','massive','cats','sammy1','mister','stud','marathon','rubber','ding','trunks','desire','montreal','justme','faster','kathleen','irish','1999','bertha','jessica1','alpine','sammie','diamonds','tristan','00000','swinger','shan','stallion','pitbull','letmein2','roberto','ready','april','palmer','ming','shadow1','audrey','chong','clitoris','wang','shirley','fuckers','jackoff','bluesky','sundance','renegade','hollywoo','151515','bernard','wolfman','soldier','picture','pierre','ling','goddess','manager','nikita','sweety','titans','hang','fang','ficken','niners','bottom','bubble','hello123','ibanez','webster','sweetpea','stocking','323232','tornado','lindsey','content','bruce','buck','aragorn','griffin','chen','campbell','trojan','christop','newman','wayne','tina','rockstar','father','geronimo','pascal','crimson','brooks','hector','penny','anna','google','camera','chandler','fatcat','lovelove','cody','cunts','waters','stimpy','finger','cindy','wheels','viper1','latin','robin','greenday','987654321','creampie','brendan','hiphop','willy','snapper','funtime','duck','trombone','adult','cotton','cookies','kaiser','mulder','westham','latino','jeep','ravens','aurora','drizzt','madness','energy','kinky','314159','sophia','stefan','slick','rocker','55555555','freeman','french','mongoose','speed','dddddd','hong','henry','hungry','yang','catdog','cheng','ghost','gogogo','randy','tottenha','curious','butterfl','mission','january','singer','sherman','shark','techno','lancer','lalala','autumn','chichi','orion','trixie','clifford','delta','bobbob','bomber','holden','kang','kiss','1968','spunky','liquid','mary','beagle','granny','network','bond','kkkkkk','millie','1973','biggie','beetle','teacher','susan','toronto','anakin','genius','dream','cocks','dang','bush','karate','snakes','bangkok','callie','fuckyou2','pacific','daytona','kelsey','infantry','skywalke','foster','felix','sailing','raistlin','vanhalen','huang','herbert','jacob','blackie','tarzan','strider','sherlock','lang','gong','sang','dietcoke','ultimate','tree','shai','sprite','ting','artist','chai','chao','devil','python','ninja','misty','ytrewq','sweetie','superfly','456789','tian','jing','jesus1','freedom1','dian','drpepper','potter','chou','darren','hobbit','violet','yong','shen','phillip','maurice','gloria','nolimit','mylove','biscuit','yahoo','shasta','sex4me','smoker','smile','pebbles','pics','philly','tong','tintin','lesbians','marlin','cactus','frank1','tttttt','chun','danni','emerald','showme','pirates','lian','dogg','colleen','xiao','xian','tazman','tanker','patton','toshiba','richie','alberto','gotcha','graham','dillon','rang','emily','keng','jazz','bigguy','yuan','woman','tomtom','marion','greg','chaos','fossil','flight','racerx','tuan','creamy','boss','bobo','musicman','warcraft','window','blade','shuang','sheila','shun','lick','jian','microsoft','rong','allen','feng','getsome','sally','quality','kennedy','morrison','1977','beng','wwwwww','yoyoyo','zhang','seng','teddy','joanna','andreas','harder','luke','qazxsw','qian','cong','chuan','deng','nang','boeing','keeper','western','isabelle','1963','subaru','sheng','thuglife','teng','jiong','miao','martina','mang','maniac','pussie','tracey','a1b2c3','clayton','zhou','zhuang','xing','stonecol','snow','spyder','liang','jiang','memphis','regina','ceng','magic1','logitech','chuang','dark','million','blow','sesame','shao','poison','titty','terry','kuan','kuai','kyle','mian','guan','hamster','guai','ferret','florence','geng','duan','pang','maiden','quan','velvet','nong','neng','nookie','buttons','bian','bingo','biao','zhong','zeng','xiong','zhun','ying','zong','xuan','zang','0.0.000','suan','shei','shui','sharks','shang','shua','small','peng','pian','piao','liao','meng','miami','reng','guang','cang','change','ruan','diao','luan','lucas','qing','chui','chuo','cuan','nuan','ning','heng','huan','kansas','muscle','monroe','weng','whitney','1passwor','bluemoon','zhui','zhua','xiang','zheng','zhen','zhei','zhao','zhan','yomama','zhai','zhuo','zuan','tarheel','shou','shuo','tiao','lady','leonard','leng','kuang','jiao','13579','basket','qiao','qiong','qiang','chuai','nian','niao','niang','huai','22222222','bianca','zhuan','zhuai','shuan','shuai','stardust','jumper','margaret','archie','66666666','charlott','forget','qwertz','bones','history','milton','waterloo','2002','stuff','11223344','office','oldman','preston','trains','murray','vertigo','246810','black1','swallow','smiles','standard','alexandr','parrot','luther','user','nicolas','1976','surfing','pioneer','pete','masters','apple1','asdasd','auburn','hannibal','frontier','panama','lucy','buffy','brianna','welcome1','vette','blue22','shemale','111222','baggins','groovy','global','turner','181818','1979','blades','spanking','life','byteme','lobster','collins','dawg','hilton','japanese','1970','1964','2424','polo','markus','coco','deedee','mikey','1972','171717','1701','strip','jersey','green1','capital','sasha','sadie','putter','vader','seven7','lester','marcel','banshee','grendel','gilbert','dicks','dead','hidden','iloveu','1980','sound','ledzep','michel','147258','female','bugger','buffett','bryan','hell','kristina','molson','2020','wookie','sprint','thanks','jericho','102030','grace','fuckin','mandy','ranger1','trebor','deepthroat','bonehead','molly1','mirage','models','1984','2468','stuart','showtime','squirrel','pentium','mario','anime','gator','powder','twister','connect','neptune','bruno','butts','engine','eatshit','mustangs','woody1','shogun','septembe','pooh','jimbo','roger','annie','bacon','center','russian','sabine','damien','mollie','voyeur','2525','363636','leonardo','camel','chair','germany','giant','qqqq','nudist','bone','sleepy','tequila','megan','fighter','garrett','dominic','obiwan','makaveli','vacation','walnut','1974','ladybug','cantona','ccbill','satan','rusty1','passwor1','columbia','napoleon','dusty','kissme','motorola','william1','1967','zzzz','skater','smut','play','matthew1','robinson','valley','coolio','dagger','boner','bull','horndog','jason1','blake','penguins','rescue','griffey','8j4ye3uz','californ','champs','qwertyuiop','portland','queen','colt45','boat','xxxxxxx','xanadu','tacoma','mason','carpet','gggggg','safety','palace','italia','stevie','picturs','picasso','thongs','tempest','ricardo','roberts','asd123','hairy','foxtrot','gary','nimrod','hotboy','343434','1111111','asdfghjkl','goose','overlord','blood','wood','stranger','454545','shaolin','sooners','socrates','spiderman','peanuts','maxine','rogers','13131313','andrew1','filthy','donnie','ohyeah','africa','national','kenny','keith','monique','intrepid','jasmin','pickles','assass','fright','potato','darwin','hhhhhh','kingdom','weezer','424242','pepsi1','throat','romeo','gerard','looker','puppy','butch','monika','suzanne','sweets','temple','laurie','josh','megadeth','analsex','nymets','ddddddd','bigballs','support','stick','today','down','oakland','oooooo','qweasd','chucky','bridge','carrot','chargers','discover','dookie','condor','night','butler','hoover','horny1','isabella','sunrise','sinner','jojo','megapass','martini','assfuck','grateful','ffffff','abigail','esther','mushroom','janice','jamaica','wright','sims','space','there','timmy','7654321','77777','cccccc','gizmodo','roxanne','ralph','tractor','cristina','dance','mypass','hongkong','helena','1975','blue123','pissing','thomas1','redred','rich','basketball','attack','cash','satan666','drunk','dixie','dublin','bollox','kingkong','katrina','miles','1971','22222','272727','sexx','penelope','thompson','anything','bbbb','battle','grizzly','passat','porter','tracy','defiant','bowler','knickers','monitor','wisdom','wild','slappy','thor','letsgo','robert1','feet','rush','brownie','hudson','098765','playing','playtime','lightnin','melvin','atomic','bart','hawk','goku','glory','llllll','qwaszx','cosmos','bosco','knights','bentley','beast','slapshot','lewis','assword','frosty','gillian','sara','dumbass','mallard','dddd','deanna','elwood','wally','159357','titleist','angelo','aussie','guest','golfing','doobie','loveit','chloe','elliott','werewolf','vipers','janine','1965','blabla','surf','sucking','tardis','serena','shelley','thegame','legion','rebels','fernando','fast','gerald','sarah1','double','onelove','loulou','toto','crash','blackcat','0007','tacobell','soccer1','jedi','manuel','method','river','chase','ludwig','poopie','derrick','boob','breast','kittycat','isabel','belly','pikachu','thunder1','thankyou','jose','celeste','celtics','frances','frogger','scoobydo','sabbath','coltrane','budman','willis','jackal','bigger','zzzzz','silvia','sooner','licking','gopher','geheim','lonestar','primus','pooper','newpass','brasil','heather1','husker','element','moomoo','beefcake','zzzzzzzz','tammy','shitty','smokin','personal','jjjj','anthony1','anubis','backup','gorilla','fuckface','painter','lowrider','punkrock','traffic','claude','daniela','dale','delta1','nancy','boys','easy','kissing','kelley','wendy','theresa','amazon','alan','fatass','dodgeram','dingdong','malcolm','qqqqqqqq','breasts','boots','honda1','spidey','poker','temp','johnjohn','miguel','147852','archer','asshole1','dogdog','tricky','crusader','weather','syracuse','spankme','speaker','meridian','amadeus','back','harley1','falcons','dorothy','turkey50','kenwood','keyboard','ilovesex','1978','blackman','shazam','shalom','lickit','jimbob','richmond','roller','carson','check','fatman','funny','garbage','sandiego','loving','magnus','cooldude','clover','mobile','bell','payton','plumber','texas1','tool','topper','jenna','mariners','rebel','harmony','caliente','celica','fletcher','german','diana','oxford','osiris','orgasm','punkin','porsche9','tuesday','close','breeze','bossman','kangaroo','billie','latinas','judith','astros','scruffy','donna','qwertyu','davis','hearts','kathy','jammer','java','springer','rhonda','ricky','1122','goodtime','chelsea1','freckles','flyboy','doodle','city','nebraska','bootie','kicker','webmaster','vulcan','iverson','191919','blueeyes','stoner','321321','farside','rugby','director','pussy69','power1','bobbie','hershey','hermes','monopoly','west','birdman','blessed','blackjac','southern','peterpan','thumbs','lawyer','melinda','fingers','fuckyou1','rrrrrr','a1b2c3d4','coke','nicola','bohica','heart','elvis1','kids','blacky','stories','sentinel','snake1','phoebe','jesse','richard1','1234abcd','guardian','candyman','fisting','scarlet','dildo','pancho','mandingo','lucky7','condom','munchkin','billyboy','summer1','student','sword','skiing','sergio','site','sony','thong','rootbeer','assassin','cassidy','frederic','fffff','fitness','giovanni','scarlett','durango','postal','achilles','dawn','dylan','kisses','warriors','imagine','plymouth','topdog','asterix','hallo','cameltoe','fuckfuck','bridget','eeeeee','mouth','weird','will','sithlord','sommer','toby','theking','juliet','avenger','backdoor','goodbye','chevrole','faith','lorraine','trance','cosworth','brad','houses','homers','eternity','kingpin','verbatim','incubus','1961','blond','zaphod','shiloh','spurs','station','jennie','maynard','mighty','aliens','hank','charly','running','dogman','omega1','printer','aggies','chocolate','deadhead','hope','javier','bitch1','stone55','pineappl','thekid','lizzie','rockets','ashton','camels','formula','forrest','rosemary','oracle','rain','pussey','porkchop','abcde','clancy','nellie','mystic','inferno','blackdog','steve1','pauline','alexander','alice','alfa','grumpy','flames','scream','lonely','puffy','proxy','valhalla','unreal','cynthia','herbie','engage','yyyyyy','010101','solomon','pistol','melody','celeb','flying','gggg','santiago','scottie','oakley','portugal','a12345','newbie','mmmm','venus','1qazxsw2','beverly','zorro','work','writer','stripper','sebastia','spread','phil','tobias','links','members','metal','1221','andre','565656','funfun','trojans','again','cyber','hurrican','moneys','1x2zkg8w','zeus','thing','tomato','lion','atlantic','celine','usa123','trans','account','aaaaaaa','homerun','hyperion','kevin1','blacks','44444444','skittles','sean','hastings','fart','gangbang','fubar','sailboat','older','oilers','craig','conrad','church','damian','dean','broken','buster1','hithere','immortal','sticks','pilot','peters','lexmark','jerkoff','maryland','anders','cheers','possum','columbus','cutter','muppet','beautiful','stolen','swordfish','sport','sonic','peter1','jethro','rockon','asdfghj','pass123','paper','pornos','ncc1701a','bootys','buttman','bonjour','escape','1960','becky','bears','362436','spartans','tinman','threesom','lemons','maxmax','1414','bbbbb','camelot','chad','chewie','gogo','fusion','saint','dilligaf','nopass','myself','hustler','hunter1','whitey','beast1','yesyes','spank','smudge','pinkfloy','patriot','lespaul','annette','hammers','catalina','finish','formula1','sausage','scooter1','orioles','oscar1','over','colombia','cramps','natural','eating','exotic','iguana','bella','suckers','strong','sheena','start','slave','pearl','topcat','lancelot','angelica','magelan','racer','ramona','crunch','british','button','eileen','steph','456123','skinny','seeking','rockhard','chief','filter','first','freaks','sakura','pacman','poontang','dalton','newlife','homer1','klingon','watcher','walleye','tasha','tasty','sinatra','starship','steel','starbuck','poncho','amber1','gonzo','grover','catherin','carol','candle','firefly','goblin','scotch','diver','usmc','huskies','eleven','kentucky','kitkat','israel','beckham','bicycle','yourmom','studio','tara','33333333','shane','splash','jimmy1','reality','12344321','caitlin','focus','sapphire','mailman','raiders1','clark','ddddd','hopper','excalibu','more','wilbur','illini','imperial','phillips','lansing','maxx','gothic','golfball','carlton','camille','facial','front242','macdaddy','qwer1234','vectra','cowboys1','crazy1','dannyboy','jane','betty','benny','bennett','leader','martinez','aquarius','barkley','hayden','caught','franky','ffff','floyd','sassy','pppp','pppppppp','prodigy','clarence','noodle','eatpussy','vortex','wanking','beatrice','billy1','siemens','pedro','phillies','research','groups','carolyn','chevy1','cccc','fritz','gggggggg','doughboy','dracula','nurses','loco','madrid','lollipop','trout','utopia','chrono','cooler','conner','nevada','wibble','werner','summit','marco','marilyn','1225','babies','capone','fugazi','panda','mama','qazwsxed','puppies','triton','9876','command','nnnnnn','ernest','momoney','iforgot','wolfie','studly','shawn','renee','alien','hamburg','81fukkc','741852','catman','china','forgot','gagging','scott1','drew','oregon','qweqwe','train','crazybab','daniel1','cutlass','brothers','holes','heidi','mothers','music1','what','walrus','1957','bigtime','bike','xtreme','simba','ssss','rookie','angie','bathing','fresh','sanchez','rotten','maestro','luis','look','turbo1','99999','butthole','hhhh','elijah','monty','bender','yoda','shania','shock','phish','thecat','rightnow','reagan','baddog','asia','greatone','gateway1','randall','abstr','napster','brian1','bogart','high','hitler','emma','kill','weaver','wildfire','jackson1','isaiah','1981','belinda','beaner','yoyo','0.0.0.000','super1','select','snuggles','slutty','some','phoenix1','technics','toon','raven1','rayray','123789','1066','albion','greens','fashion','gesperrt','santana','paint','powell','credit','darling','mystery','bowser','bottle','brucelee','hehehe','kelly1','mojo','1998','bikini','woofwoof','yyyy','strap','sites','spears','theodore','julius','richards','amelia','central','f**k','nyjets','punisher','username','vanilla','twisted','bryant','brent','bunghole','here','elizabeth','erica','kimber','viagra','veritas','pony','pool','titts','labtec','lifetime','jenny1','masterbate','mayhem','redbull','govols','gremlin','505050','gmoney','rupert','rovers','diamond1','lorenzo','trident','abnormal','davidson','deskjet','cuddles','nice','bristol','karina','milano','vh5150','jarhead','1982','bigbird','bizkit','sixers','slider','star69','starfish','penetration','tommy1','john316','meghan','michaela','market','grant','caligula','carl','flicks','films','madden','railroad','cosmo','cthulhu','bradford','br0d3r','military','bearbear','swedish','spawn','patrick1','polly','these','todd','reds','anarchy','groove','franco','fuckher','oooo','tyrone','vegas','airbus','cobra1','christine','clips','delete','duster','kitty1','mouse1','monkeys','jazzman','1919','262626','swinging','stroke','stocks','sting','pippen','labrador','jordan1','justdoit','meatball','females','saturday','park','vector','cooter','defender','desert','demon','nike','bubbas','bonkers','english','kahuna','wildman','4121','sirius','static','piercing','terror','teenage','leelee','marissa','microsof','mechanic','robotech','rated','hailey','chaser','sanders','salsero','nuts','macross','quantum','rachael','tsunami','universe','daddy1','cruise','nguyen','newpass6','nudes','hellyeah','vernon','1959','zaq12wsx','striker','sixty','steele','spice','spectrum','smegma','thumb','jjjjjjjj','mellow','astrid','cancun','cartoon','sabres','samiam','pants','oranges','oklahoma','lust','coleman','denali','nude','noodles','buzz','brest','hooter','mmmmmmmm','warthog','bloody','blueblue','zappa','wolverine','sniffing','lance','jean','jjjjj','harper','calico','freee','rover','door','pooter','closeup','bonsai','evelyn','emily1','kathryn','keystone','iiii','1955','yzerman','theboss','tolkien','jill','megaman','rasta','bbbbbbbb','bean','handsome','hal9000','goofy','gringo','gofish','gizmo1','samsam','scuba','onlyme','tttttttt','corrado','clown','clapton','deborah','boris','bulls','vivian','jayhawk','bethany','wwww','sharky','seeker','ssssssss','somethin','pillow','thesims','lighter','lkjhgf','melissa1','marcius2','barry','guiness','gymnast','casey1','goalie','godsmack','doug','lolo','rangers1','poppy','abby','clemson','clipper','deeznuts','nobody','holly1','elliot','eeee','kingston','miriam','belle','yosemite','sucked','sex123','sexy69','pic\'s','tommyboy','lamont','meat','masterbating','marianne','marc','gretzky','happyday','frisco','scratch','orchid','orange1','manchest','quincy','unbelievable','aberdeen','dawson','nathalie','ne1469','boxing','hill','korn','intercourse','161616','1985','ziggy','supersta','stoney','senior','amature','barber','babyboy','bcfields','goliath','hack','hardrock','children','frodo','scout','scrappy','rosie','qazqaz','tracker','active','craving','commando','cohiba','deep','cyclone','dana','bubba69','katie1','mpegs','vsegda','jade','irish1','better','sexy1','sinclair','smelly','squerting','lions','jokers','jeanette','julia','jojojo','meathead','ashley1','groucho','cheetah','champ','firefox','gandalf1','packer','magnolia','love69','tyler1','typhoon','tundra','bobby1','kenworth','village','volley','beth','wolf359','0420','000007','swimmer','skydive','smokes','patty','peugeot','pompey','legolas','kristy','redhot','rodman','redalert','having','grapes','4runner','carrera','floppy','dollars','ou8122','quattro','adams','cloud9','davids','nofear','busty','homemade','mmmmm','whisper','vermont','webmaste','wives','insertion','jayjay','philips','phone','topher','tongue','temptress','midget','ripken','havefun','gretchen','canon','celebrity','five','getting','ghetto','direct','otto','ragnarok','trinidad','usnavy','conover','cruiser','dalshe','nicole1','buzzard','hottest','kingfish','misfit','moore','milfnew','warlord','wassup','bigsexy','blackhaw','zippy','shearer','tights','thursday','kungfu','labia','journey','meatloaf','marlene','rider','area51','batman1','bananas','636363','cancel','ggggg','paradox','mack','lynn','queens','adults','aikido','cigars','nova','hoosier','eeyore','moose1','warez','interacial','streaming','313131','pertinant','pool6123','mayday','rivers','revenge','animated','banker','baddest','gordon24','ccccc','fortune','fantasies','touching','aisan','deadman','homepage','ejaculation','whocares','iscool','jamesbon','1956','1pussy','womam','sweden','skidoo','spock','sssss','petra','pepper1','pinhead','micron','allsop','amsterda','army','aside','gunnar','666999','chip','foot','fowler','february','face','fletch','george1','sapper','science','sasha1','luckydog','lover1','magick','popopo','public','ultima','derek','cypress','booker','businessbabe','brandon1','edwards','experience','vulva','vvvv','jabroni','bigbear','yummy','010203','searay','secret1','showing','sinbad','sexxxx','soleil','software','piccolo','thirteen','leopard','legacy','jensen','justine','memorex','marisa','mathew','redwing','rasputin','134679','anfield','greenbay','gore','catcat','feather','scanner','pa55word','contortionist','danzig','daisy1','hores','erik','exodus','vinnie','iiiiii','zero','1001','subway','tank','second','snapple','sneakers','sonyfuck','picks','poodle','test1234','their','llll','junebug','june','marker','mellon','ronaldo','roadkill','amanda1','asdfjkl','beaches','greene','great1','cheerleaers','force','doitnow','ozzy','madeline','radio','tyson','christian','daphne','boxster','brighton','housewifes','emmanuel','emerson','kkkk','mnbvcx','moocow','vides','wagner','janet','1717','bigmoney','blonds','1000','storys','stereo','4545','420247','seductive','sexygirl','lesbean','live','justin1','124578','animals','balance','hansen','cabbage','canadian','gangbanged','dodge1','dimas','lori','loud','malaka','puss','probes','adriana','coolman','crawford','dante','nacked','hotpussy','erotica','kool','mirror','wearing','implants','intruder','bigass','zenith','woohoo','womans','tanya','tango','stacy','pisces','laguna','krystal','maxell','andyod22','barcelon','chainsaw','chickens','flash1','downtown','orgasms','magicman','profit','pusyy','pothead','coconut','chuckie','contact','clevelan','designer','builder','budweise','hotshot','horizon','hole','experienced','mondeo','wifes','1962','strange','stumpy','smiths','sparks','slacker','piper','pitchers','passwords','laptop','jeremiah','allmine','alliance','bbbbbbb','asscock','halflife','grandma','hayley','88888','cecilia','chacha','saratoga','sandy1','santos','doogie','number','positive','qwert40','transexual','crow','close-up','darrell','bonita','ib6ub9','volvo','jacob1','iiiii','beastie','sunnyday','stoned','sonics','starfire','snapon','pictuers','pepe','testing1','tiberius','lisalisa','lesbain','litle','retard','ripple','austin1','badgirl','golfgolf','flounder','garage','royals','dragoon','dickie','passwor','ocean','majestic','poppop','trailers','dammit','nokia','bobobo','br549','emmitt','knock','minime','mikemike','whitesox','1954','3232','353535','seamus','solo','sparkle','sluttey','pictere','titten','lback','1024','angelina','goodluck','charlton','fingerig','gallaries','goat','ruby','passme','oasis','lockerroom','logan1','rainman','twins','treasure','absolutely','club','custom','cyclops','nipper','bucket','homepage-','hhhhh','momsuck','indain','2345','beerbeer','bimmer','susanne','stunner','stevens','456456','shell','sheba','tootsie','tiny','testerer','reefer','really','1012','harcore','gollum','545454','chico','caveman','carole','fordf150','fishes','gaymen','saleen','doodoo','pa55w0rd','looney','presto','qqqqq','cigar','bogey','brewer','helloo','dutch','kamikaze','monte','wasser','vietnam','visa','japanees','0123','swords','slapper','peach','jump','marvel','masterbaiting','march','redwood','rolling','1005','ametuer','chiks','cathy','callaway','fucing','sadie1','panasoni','mamas','race','rambo','unknown','absolut','deacon','dallas1','housewife','kristi','keywest','kirsten','kipper','morning','wings','idiot','18436572','1515','beating','zxczxc','sullivan','303030','shaman','sparrow','terrapin','jeffery','masturbation','mick','redfish','1492','angus','barrett','goirish','hardcock','felicia','forfun','galary','freeporn','duchess','olivier','lotus','pornographic','ramses','purdue','traveler','crave','brando','enter1','killme','moneyman','welder','windsor','wifey','indon','yyyyy','stretch','taylor1','4417','shopping','picher','pickup','thumbnils','johnboy','jets','jess','maureen','anne','ameteur','amateurs','apollo13','hambone','goldwing','5050','charley','sally1','doghouse','padres','pounding','quest','truelove','underdog','trader','crack','climber','bolitas','bravo','hohoho','model','italian','beanie','beretta','wrestlin','stroker','tabitha','sherwood','sexyman','jewels','johannes','mets','marcos','rhino','bdsm','balloons','goodman','grils','happy123','flamingo','games','route66','devo','dino','outkast','paintbal','magpie','llllllll','twilight','critter','christie','cupcake','nickel','bullseye','krista','knickerless','mimi','murder','videoes','binladen','xerxes','slim','slinky','pinky','peterson','thanatos','meister','menace','ripley','retired','albatros','balloon','bank','goten','5551212','getsdown','donuts','divorce','nwo4life','lord','lost','underwear','tttt','comet','deer','damnit','dddddddd','deeznutz','nasty1','nonono','nina','enterprise','eeeee','misfit99','milkman','vvvvvv','isaac','1818','blueboy','beans','bigbutt','wyatt','tech','solution','poetry','toolman','laurel','juggalo','jetski','meredith','barefoot','50spanks','gobears','scandinavian','original','truman','cubbies','nitram','briana','ebony','kings','warner','bilbo','yumyum','zzzzzzz','stylus','321654','shannon1','server','secure','silly','squash','starman','steeler','staples','phrases','techniques','laser','135790','allan','barker','athens','cbr600','chemical','fester','gangsta','fucku2','freeze','game','salvador','droopy','objects','passwd','lllll','loaded','louis','manchester','losers','vedder','clit','chunky','darkman','damage','buckshot','buddah','boobed','henti','hillary','webber','winter1','ingrid','bigmike','beta','zidane','talon','slave1','pissoff','person','thegreat','living','lexus','matador','readers','riley','roberta','armani','ashlee','goldstar','5656','cards','fmale','ferris','fuking','gaston','fucku','ggggggg','sauron','diggler','pacers','looser','pounded','premier','pulled','town','trisha','triangle','cornell','collin','cosmic','deeper','depeche','norway','bright','helmet','kristine','kendall','mustard','misty1','watch','jagger','bertie','berger','word','3x7pxr','silver1','smoking','snowboar','sonny','paula','penetrating','photoes','lesbens','lambert','lindros','lillian','roadking','rockford','1357','143143','asasas','goodboy','898989','chicago1','card','ferrari1','galeries','godfathe','gawker','gargoyle','gangster','rubble','rrrr','onetime','pussyman','pooppoop','trapper','twenty','abraham','cinder','company','newcastl','boricua','bunny1','boxer','hotred','hockey1','hooper','edward1','evan','kris','misery','moscow','milk','mortgage','bigtit','show','snoopdog','three','lionel','leanne','joshua1','july','1230','assholes','cedric','fallen','farley','gene','frisky','sanity','script','divine','dharma','lucky13','property','tricia','akira','desiree','broadway','butterfly','hunt','hotbox','hootie','heat','howdy','earthlink','karma','kiteboy','motley','westwood','1988','bert','blackbir','biggles','wrench','working','wrestle','slippery','pheonix','penny1','pianoman','tomorrow','thedude','jenn','jonjon','jones1','mattie','memory','micheal','roadrunn','arrow','attitude','azzer','seahawks','diehard','dotcom','lola','tunafish','chivas','cinnamon','clouds','deluxe','northern','nuclear','north','boom','boobie','hurley','krishna','momomo','modles','volume','23232323','bluedog','wwwwwww','zerocool','yousuck','pluto','limewire','link','joung','marcia','awnyce','gonavy','haha','films+pic+galeries','fabian','francois','girsl','fuckthis','girfriend','rufus','drive','uncencored','a123456','airport','clay','chrisbln','combat','cygnus','cupoi','never','netscape','brett','hhhhhhhh','eagles1','elite','knockers','kendra','mommy','1958','tazmania','shonuf','piano','pharmacy','thedog','lips','jillian','jenkins','midway','arsenal1','anaconda','australi','gromit','gotohell','787878','66666','carmex2','camber','gator1','ginger1','fuzzy','seadoo','dorian','lovesex','rancid','uuuuuu','911911','nature','bulldog1','helen','health','heater','higgins','kirk','monalisa','mmmmmmm','whiteout','virtual','ventura','jamie1','japanes','james007','2727','2469','blam','bitchass','believe','zephyr','stiffy','sweet1','silent','southpar','spectre','tigger1','tekken','lenny','lakota','lionking','jjjjjjj','medical','megatron','1369','hawaiian','gymnastic','golfer1','gunners','7779311','515151','famous','glass','screen','rudy','royal','sanfran','drake','optimus','panther1','love1','mail','maggie1','pudding','venice','aaron1','delphi','niceass','bounce','busted','house1','killer1','miracle','momo','musashi','jammin','2003','234567','wp2003wp','submit','silence','sssssss','state','spikes','sleeper','passwort','toledo','kume','media','meme','medusa','mantis','remote','reading','reebok','1017','artemis','hampton','harry1','cafc91','fettish','friendly','oceans','oooooooo','mango','ppppp','trainer','troy','uuuu','909090','cross','death1','news','bullfrog','hokies','holyshit','eeeeeee','mitch','jasmine1','&','&','sergeant','spinner','leon','jockey','records','right','babyblue','hans','gooner','474747','cheeks','cars','candice','fight','glow','pass1234','parola','okokok','pablo','magical','major','ramsey','poseidon','989898','confused','circle','crusher','cubswin','nnnn','hollywood','erin','kotaku','milo','mittens','whatsup','vvvvv','iomega','insertions','bengals','bermuda','biit','yellow1','012345','spike1','south','sowhat','pitures','peacock','pecker','theend','juliette','jimmie','romance','augusta','hayabusa','hawkeyes','castro','florian','geoffrey','dolly','lulu','qaz123','usarmy','twinkle','cloud','chuckles','cold','hounddog','hover','hothot','europa','ernie','kenshin','kojak','mikey1','water1','196969','because','wraith','zebra','wwwww','33333','simon1','spider1','snuffy','philippe','thunderb','teddy1','lesley','marino13','maria1','redline','renault','aloha','antoine','handyman','cerberus','gamecock','gobucks','freesex','duffman','ooooo','papa','nuggets','magician','longbow','preacher','porno1','county','chrysler','contains','dalejr','darius','darlene','dell','navy','buffy1','hedgehog','hoosiers','honey1','hott','heyhey','europe','dutchess','everest','wareagle','ihateyou','sunflowe','3434','senators','shag','spoon','sonoma','stalker','poochie','terminal','terefon','laurence','maradona','maryann','marty','roman','1007','142536','alibaba','america1','bartman','astro','goth','century','chicken1','cheater','four','ghost1','passpass','oral','r2d2c3po','civic','cicero','myxworld','kkkkk','missouri','wishbone','infiniti','jameson','1a2b3c','1qwerty','wonderboy','skip','shojou','stanford','sparky1','smeghead','poiuy','titanium','torres','lantern','jelly','jeanne','meier','1213','bayern','basset','gsxr750','cattle','charlene','fishing1','fullmoon','gilles','dima','obelix','popo','prissy','ramrod','unique','absolute','bummer','hotone','dynasty','entry','konyor','missy1','moses','282828','yeah','xyz123','stop','426hemi','404040','seinfeld','simmons','pingpong','lazarus','matthews','marine1','manning','recovery','12345a','beamer','babyface','greece','gustav','7007','charity','camilla','ccccccc','faggot','foxy','frozen','gladiato','duckie','dogfood','paranoid','packers1','longjohn','radical','tuna','clarinet','claudio','circus','danny1','novell','nights','bonbon','kashmir','kiki','mortimer','modelsne','moondog','monaco','vladimir','insert','1953','zxc123','supreme','3131','sexxx','selena','softail','poipoi','pong','together','mars','martin1','rogue','alone','avalanch','audia4','55bgates','cccccccc','chick','came11','figaro','geneva','dogboy','dnsadm','dipshit','paradigm','othello','operator','officer','malone','post','rafael','valencia','tripod','choice','chopin','coucou','coach','cocksuck','common','creature','borussia','book','browning','heritage','hiziad','homerj','eight','earth','millions','mullet','whisky','jacques','store','4242','speedo','starcraf','skylar','spaceman','piggy','pierce','tiger2','legos','lala','jezebel','judy','joker1','mazda','barton','baker','727272','chester1','fishman','food','rrrrrrrr','sandwich','dundee','lumber','magazine','radar','ppppppp','tranny','aaliyah','admiral','comics','cleo','delight','buttfuck','homeboy','eternal','kilroy','kellie','khan','violin','wingman','walmart','bigblue','blaze','beemer','beowulf','bigfish','yyyyyyy','woodie','yeahbaby','0123456','tbone','style','syzygy','starter','lemon','linda1','merlot','mexican','11235813','anita','banner','bangbang','badman','barfly','grease','carla','charles1','ffffffff','screw','doberman','diane','dogshit','overkill','counter','coolguy','claymore','demons','demo','nomore','normal','brewster','hhhhhhh','hondas','iamgod','enterme','everett','electron','eastside','kayla','minimoni','mybaby','wildbill','wildcard','ipswich','200000','bearcat','zigzag','yyyyyyyy','xander','sweetnes','369369','skyler','skywalker','pigeon','peyton','tipper','lilly','asdf123','alphabet','asdzxc','babybaby','banane','barnes','guyver','graphics','grand','chinook','florida1','flexible','fuckinside','otis','ursitesux','tototo','trust','tower','adam12','christma','corey','chrome','buddie','bombers','bunker','hippie','keegan','misfits','vickie','292929','woofer','wwwwwwww','stubby','sheep','secrets','sparta','stang','spud','sporty','pinball','jorge','just4fun','johanna','maxxxx','rebecca1','gunther','fatima','fffffff','freeway','garion','score','rrrrr','sancho','outback','maggot','puddin','trial','adrienne','987456','colton','clyde','brain','brains','hoops','eleanor','dwayne','kirby','mydick','villa','19691969','bigcat','becker','shiner','silverad','spanish','templar','lamer','juicy','marsha','mike1','maximum','rhiannon','real','1223','10101010','arrows','andres','alucard','baldwin','baron','avenue','ashleigh','haggis','channel','cheech','safari','ross','dog123','orion1','paloma','qwerasdf','presiden','vegitto','trees','969696','adonis','colonel','cookie1','newyork1','brigitte','buddyboy','hellos','heineken','dwight','eraser','kerstin','motion','moritz','millwall','visual','jaybird','1983','beautifu','bitter','yvette','zodiac','steven1','sinister','slammer','smashing','slick1','sponge','teddybea','theater','this','ticklish','lipstick','jonny','massage','mann','reynolds','ring','1211','amazing','aptiva','applepie','bailey1','guitar1','chanel','canyon','gagged','fuckme1','rough','digital1','dinosaur','punk','98765','90210','clowns','cubs','daniels','deejay','nigga','naruto','boxcar','icehouse','hotties','electra','kent','widget','india','insanity','1986','2004','best','bluefish','bingo1','*****','stratus','strength','sultan','storm1','44444','4200','sentnece','season','sexyboy','sigma','smokie','spam','point','pippo','ticket','temppass','joel','manman','medicine','1022','anton','almond','bacchus','aztnm','axio','awful','bamboo','hakr','gregor','hahahaha','5678','casanova','caprice','camero1','fellow','fountain','dupont','dolphin1','dianne','paddle','magnet','qwert1','pyon','porsche1','tripper','vampires','coming','noway','burrito','bozo','highheel','hughes','hookem','eddie1','ellie','entropy','kkkkkkkk','kkkkkkk','illinois','jacobs','1945','1951','24680','21212121','100000','stonecold','taco','subzero','sharp','sexxxy','skolko','shanna','skyhawk','spurs1','sputnik','piazza','testpass','letter','lane','kurt','jiggaman','matilda','1224','harvard','hannah1','525252','4ever','carbon','chef','federico','ghosts','gina','scorpio1','rt6ytere','madison1','loki','raquel','promise','coolness','christina','coldbeer','citadel','brittney','highway','evil','monarch','morgan1','washingt','1997','bella1','berry','yaya','yolanda','superb','taxman','studman','stephanie','3636','sherri','sheriff','shepherd','poland','pizzas','tiffany1','toilet','latina','lassie','larry1','joseph1','mephisto','meagan','marian','reptile','rico','razor','1013','barron','hammer1','gypsy','grande','carroll','camper','chippy','cat123','call','chimera','fiesta','glock','glenn','domain','dieter','dragonba','onetwo','nygiants','odessa','password2','louie','quartz','prowler','prophet','towers','ultra','cocker','corleone','dakota1','cumm','nnnnnnn','natalia','boxers','hugo','heynow','hollow','iceberg','elvira','kittykat','kate','kitchen','wasabi','vikings1','impact','beerman','string','sleep','splinter','snoopy1','pipeline','pocket','legs','maple','mickey1','manuela','mermaid','micro','meowmeow','redbird','alisha','baura','battery','grass','chevys','chestnut','caravan','carina','charmed','fraser','frogman','diving','dogger','draven','drifter','oatmeal','paris1','longdong','quant4307s','rachel1','vegitta','cole','cobras','corsair','dadada','noelle','mylife','nine','bowwow','body','hotrats','eastwood','moonligh','modena','wave','illusion','iiiiiii','jayhawks','birgit','zone','sutton','susana','swingers','shocker','shrimp','sexgod','squall','stefanie','squeeze','soul','patrice','poiu','players','tigers1','toejam','tickler','line','julie1','jimbo1','jefferso','juanita','michael2','rodeo','robot','1023','annie1','bball','guess','happy2','charter','farm','flasher','falcon1','fiction','fastball','gadget','scrabble','diaper','dirtbike','dinner','oliver1','partner','paco','lucille','macman','poopy','popper','postman','ttttttt','ursula','acura','cowboy1','conan','daewoo','cyrus','customer','nation','nemrac58','nnnnn','nextel','bolton','bobdylan','hopeless','eureka','extra','kimmie','kcj9wx5n','killbill','musica','volkswag','wage','windmill','wert','vintage','iloveyou1','itsme','bessie','zippo','311311','starligh','smokey1','spot','snappy','soulmate','plasma','thelma','tonight','krusty','just4me','mcdonald','marius','rochelle','rebel1','1123','alfredo','aubrey','audi','chantal','fick','goaway','roses','sales','rusty2','dirt','dogbone','doofus','ooooooo','oblivion','mankind','luck','mahler','lllllll','pumper','puck','pulsar','valkyrie','tupac','compass','concorde','costello','cougars','delaware','niceguy','nocturne','bob123','boating','bronze','hopkins','herewego','hewlett','houhou','hubert','earnhard','eeeeeeee','keller','mingus','mobydick','venture','verizon','imation','1950','1948','1949','223344','bigbig','blossom','zack','wowwow','sissy','skinner','spiker','square','snooker','sluggo','player1','junk','jeannie','jsbach','jumbo','jewel','medic','robins','reddevil','reckless','123456a','1125','1031','beacon','astra','gumby','hammond','hassan','757575','585858','chillin','fuck1','sander','lowell','radiohea','upyours','trek','courage','coolcool','classics','choochoo','darryl','nikki1','nitro','bugs','boytoy','ellen','excite','kirsty','kane','wingnut','wireless','icu812','1master','beatle','bigblock','blanca','wolfen','summer99','sugar1','tartar','sexysexy','senna','sexman','sick','someone','soprano','pippin','platypus','pixies','telephon','land','laura1','laurent','rimmer','road','report','1020','12qwaszx','arturo','around','hamish','halifax','fishhead','forum','dododo','doit','outside','paramedi','lonesome','mandy1','twist','uuuuu','uranus','ttttt','butcher','bruce1','helper','hopeful','eduard','dusty1','kathy1','katherin','moonbeam','muscles','monster1','monkeybo','morton','windsurf','vvvvvvv','vivid','install','1947','187187','1941','1952','tatiana','susan1','31415926','sinned','sexxy','senator','sebastian','shadows','smoothie','snowflak','playstat','playa','playboy1','toaster','jerry1','marie1','mason1','merlin1','roger1','roadster','112358','1121','andrea1','bacardi','auto','hardware','hardy','789789','5555555','captain1','flores','fergus','sascha','rrrrrrr','dome','onion','nutter','lololo','qqqqqqq','quick','undertak','uuuuuuuu','uuuuuuu','criminal','cobain','cindy1','coors','dani','descent','nimbus','nomad','nanook','norwich','bomb','bombay','broker','hookup','kiwi','winners','jackpot','1a2b3c4d','1776','beardog','bighead','blast','bird33','0987','stress','shot','spooge','pelican','peepee','perry','pointer','titan','thedoors','jeremy1','annabell','altima','baba','hallie','hate','hardone','5454','candace','catwoman','flip','faithful','finance','farmboy','farscape','genesis1','salomon','destroy','papers','option','page','loser1','lopez','r2d2','pumpkins','training','chriss','cumcum','ninjas','ninja1','hung','erika','eduardo','killers','miller1','islander','jamesbond','intel','jarvis','19841984','2626','bizzare','blue12','biker','yoyoma','sushi','styles','shitface','series','shanti','spanker','steffi','smart','sphinx','please1','paulie','pistons','tiburon','limited','maxwell1','mdogg','rockies','armstron','alexia','arlene','alejandr','arctic','banger','audio','asimov','augustus','grandpa','753951','4you','chilly','care1839','chapman','flyfish','fantasia','freefall','santa','sandrine','oreo','ohshit','macbeth','madcat','loveya','mallory','rage','quentin','qwerqwer','project','ramirez','colnago','citizen','chocha','cobalt','crystal1','dabears','nevets','nineinch','broncos1','helene','huge','edgar','epsilon','easter','kestrel','moron','virgil','winston1','warrior1','iiiiiiii','iloveyou2','1616','beat','bettina','woowoo','zander','straight','shower','sloppy','specialk','tinkerbe','jellybea','reader','romero','redsox1','ride','1215','1112','annika','arcadia','answer','baggio','base','guido','555666','carmel','cayman','cbr900rr','chips','gabriell','gertrude','glennwei','roxy','sausages','disco','pass1','luna','lovebug','macmac','queenie','puffin','vanguard','trip','trinitro','airwolf','abbott','aaa111','cocaine','cisco','cottage','dayton','deadly','datsun','bricks','bumper','eldorado','kidrock','wizard1','whiskers','wind','wildwood','istheman','interest','italy','25802580','benoit','bigones','woodland','wolfpac','strawber','suicide','3030','sheba1','sixpack','peace1','physics','pearson','tigger2','toad','megan1','meow','ringo','roll','amsterdam','717171','686868','5424','catherine','canuck','football1','footjob','fulham','seagull','orgy','lobo','mancity','truth','trace','vancouve','vauxhall','acidburn','derf','myspace1','boozer','buttercu','howell','hola','easton','minemine','munch','jared','1dragon','biology','bestbuy','bigpoppa','blackout','blowfish','bmw325','bigbob','stream','talisman','tazz','sundevil','3333333','skate','shutup','shanghai','shop','spencer1','slowhand','polish','pinky1','tootie','thecrow','leroy','jonathon','jubilee','jingle','martine','matrix1','manowar','michaels','messiah','mclaren','resident','reilly','redbaron','rollins','romans','return','rivera','andromed','athlon','beach1','badgers','guitars','harald','harddick','gotribe','6996','7grout','5wr2i7h8','635241','chase1','carver','charlotte','fallout','fiddle','fredrick','fenris','francesc','fortuna','ferguson','fairlane','felipe','felix1','forward','gasman','frost','fucks','sahara','sassy1','dogpound','dogbert','divx1','manila','loretta','priest','pornporn','quasar','venom','987987','access1','clippers','daylight','decker','daman','data','dentist','crusty','nathan1','nnnnnnnn','bruno1','bucks','brodie','budapest','kittens','kerouac','mother1','waldo1','wedding','whistler','whatwhat','wanderer','idontkno','1942','1946','bigdawg','bigpimp','zaqwsx','414141','3000gt','434343','shoes','serpent','starr','smurf','pasword','tommie','thisisit','lake','john1','robotics','redeye','rebelz','1011','alatam','asses','asians','bama','banzai','harvest','gonzalez','hair','hanson','575757','5329','cascade','chinese','fatty','fender1','flower2','funky','sambo','drummer1','dogcat','dottie','oedipus','osama','macleod','prozac','private1','rampage','punch','presley','concord','cook','cinema','cornwall','cleaner','christopher','ciccio','corinne','clutch','corvet07','daemon','bruiser','boiler','hjkl','eyes','egghead','expert','ethan','kasper','mordor','wasted','jamess','iverson3','bluesman','zouzou','090909','1002','switch','stone1','4040','sisters','sexo','shawna','smith1','sperma','sneaky','polska','thewho','terminat','krypton','lawson','library','lekker','jules','johnson1','johann','justus','rockie','romano','aspire','bastards','goodie','cheese1','fenway','fishon','fishin','fuckoff1','girls1','sawyer','dolores','desmond','duane','doomsday','pornking','ramones','rabbits','transit','aaaaa1','clock','delilah','noel','boyz','bookworm','bongo','bunnies','brady','buceta','highbury','henry1','heels','eastern','krissy','mischief','mopar','ministry','vienna','weston','wildone','vodka','jayson','bigbooty','beavis1','betsy','xxxxxx1','yogibear','000001','0815','zulu','420000','september','sigmar','sprout','stalin','peggy','patch','lkjhgfds','lagnaf','rolex','redfox','referee','123123123','1231','angus1','ariana','ballin','attila','hall','greedy','grunt','747474','carpedie','cecile','caramel','foxylady','field','gatorade','gidget','futbol','frosch','saiyan','schmidt','drums','donner','doggy1','drum','doudou','pack','pain','nutmeg','quebec','valdepen','trash','triple','tosser','tuscl','track','comfort','choke','comein','cola','deputy','deadpool','bremen','borders','bronson','break','hotass','hotmail1','eskimo','eggman','koko','kieran','katrin','kordell1','komodo','mone','munich','vvvvvvvv','winger','jaeger','ivan','jackson5','2222222','bergkamp','bennie','bigben','zanzibar','worm','xxx123','sunny1','373737','services','sheridan','slater','slayer1','snoop','stacie','peachy','thecure','times','little1','jennaj','marquis','middle','rasta69','1114','aries','havana','gratis','calgary','checkers','flanker','salope','dirty1','draco','dogface','luv2epus','rainbow6','qwerty123','umpire','turnip','vbnm','tucson','troll','aileen','codered','commande','damon','nana','neon','nico','nightwin','neil','boomer1','bushido','hotmail0','horace','enternow','kaitlyn','keepout','karen1','mindy','mnbv','viewsoni','volcom','wizards','wine','1995','berkeley','bite','zach','woodstoc','tarpon','shinobi','starstar','phat','patience','patrol','toolbox','julien','johnny1','joebob','marble','riders','reflex','120676','1235','angelus','anthrax','atlas','hawks','grandam','harlem','hawaii50','gorgeous','655321','cabron','challeng','callisto','firewall','firefire','fischer','flyer','flower1','factory','federal','gambler','frodo1','funk','sand','sam123','scania','dingo','papito','passmast','olive','palermo','ou8123','lock','ranch','pride','randy1','twiggy','travis1','transfer','treetop','addict','admin1','963852','aceace','clarissa','cliff','cirrus','clifton','colin','bobdole','bonner','bogus','bonjovi','bootsy','boater','elway7','edison','kelvin','kenny1','moonshin','montag','moreno','wayne1','white1','jazzy','jakejake','1994','1991','2828','blunt','bluejays','beau','belmont','worthy','systems','sensei','southpark','stan','peeper','pharao','pigpen','tomahawk','teensex','leedsutd','larkin','jermaine','jeepster','jimjim','josephin','melons','marlon','matthias','marriage','robocop','1003','1027','antelope','azsxdc','gordo','hazard','granada','8989','7894','ceasar','cabernet','cheshire','california','chelle','candy1','fergie','fanny','fidelio','giorgio','fuckhead','ruth','sanford','diego','dominion','devon','panic','longer','mackie','qawsed','trucking','twelve','chloe1','coral','daddyo','nostromo','boyboy','booster','bucky','honolulu','esquire','dynamite','motor','mollydog','wilder','windows1','waffle','wallet','warning','virus','washburn','wealth','vincent1','jabber','jaguars','javelin','irishman','idefix','bigdog1','blue42','blanked','blue32','biteme1','bearcats','blaine','yessir','sylveste','team','stephan','sunfire','tbird','stryker','3ip76k2','sevens','sheldon','pilgrim','tenchi','titman','leeds','lithium','lander','linkin','landon','marijuan','mariner','markie','midnite','reddwarf','1129','123asd','12312312','allstar','albany','asdf12','antonia','aspen','hardball','goldfing','7734','49ers','carlo','chambers','cable','carnage','callum','carlos1','fitter','fandango','festival','flame','gofast','gamma','fucmy69','scrapper','dogwood','django','magneto','loose','premium','addison','9999999','abc1234','cromwell','newyear','nichole','bookie','burns','bounty','brown1','bologna','earl','entrance','elway','killjoy','kerry','keenan','kick','klondike','mini','mouser','mohammed','wayer','impreza','irene','insomnia','24682468','2580','24242424','billbill','bellaco','blessing','blues1','bedford','blanco','blunts','stinks','teaser','streets','sf49ers','shovel','solitude','spikey','sonia','pimpdadd','timeout','toffee','lefty','johndoe','johndeer','mega','manolo','mentor','margie','ratman','ridge','record','rhodes','robin1','1124','1210','1028','1226','another','babylove','barbados','harbor','gramma','646464','carpente','chaos1','fishbone','fireblad','glasgow','frogs','scissors','screamer','salem','scuba1','ducks','driven','doggies','dicky','donovan','obsidian','rams','progress','tottenham','aikman','comanche','corolla','clarke','conway','cumslut','cyborg','dancing','boston1','bong','houdini','helmut','elvisp','edge','keksa12','misha','monty1','monsters','wetter','watford','wiseguy','veronika','visitor','janelle','1989','1987','20202020','biatch','beezer','bigguns','blueball','bitchy','wyoming','yankees2','wrestler','stupid1','sealteam','sidekick','simple1','smackdow','sporting','spiral','smeller','sperm','plato','tophat','test2','theatre','thick','toomuch','leigh','jello','jewish','junkie','maxim','maxime','meadow','remingto','roofer','124038','1018','1269','1227','123457','arkansas','alberta','aramis','andersen','beaker','barcelona','baltimor','googoo','goochi','852456','4711','catcher','carman','champ1','chess','fortress','fishfish','firefigh','geezer','rsalinas','samuel1','saigon','scooby1','doors','dick1','devin','doom','dirk','doris','dontknow','load','magpies','manfred','raleigh','vader1','universa','tulips','defense','mygirl','burn','bowtie','bowman','holycow','heinrich','honeys','enforcer','katherine','minerva','wheeler','witch','waterboy','jaime','irving','1992','23skidoo','bimbo','blue11','birddog','woodman','womble','zildjian','030303','stinker','stoppedby','sexybabe','speakers','slugger','spotty','smoke1','polopolo','perfect1','things','torpedo','tender','thrasher','lakeside','lilith','jimmys','jerk','junior1','marsh','masamune','rice','root','1214','april1','allgood','bambi','grinch','767676','5252','cherries','chipmunk','cezer121','carnival','capecod','finder','flint','fearless','goats','funstuff','gideon','savior','seabee','sandro','schalke','salasana','disney1','duckman','options','pancake','pantera1','malice','lookin','love123','lloyd','qwert123','puppet','prayers','union','tracer','crap','creation','cwoui','nascar24','hookers','hollie','hewitt','estrella','erection','ernesto','ericsson','edthom','kaylee','kokoko','kokomo','kimball','morales','mooses','monk','walton','weekend','inter','internal','1michael','1993','19781978','25252525','worker','summers','surgery','shibby','shamus','skibum','sheepdog','sex69','spliff','slipper','spoons','spanner','snowbird','slow','toriamos','temp123','tennesse','lakers1','jomama','julio','mazdarx7','rosario','recon','riddle','room','revolver','1025','1101','barney1','babycake','baylor','gotham','gravity','hallowee','hancock','616161','515000','caca','cannabis','castor','chilli','fdsa','getout','fuck69','gators1','sail','sable','rumble','dolemite','dork','dickens','duffer','dodgers1','painting','onions','logger','lorena','lookout','magic32','port','poon','prime','twat','coventry','citroen','christmas','civicsi','cocksucker','coochie','compaq1','nancy1','buzzer','boulder','butkus','bungle','hogtied','honor','hero','hotgirls','hilary','heidi1','eggplant','mustang6','mortal','monkey12','wapapapa','wendy1','volleyba','vibrate','vicky','bledsoe','blink','birthday4','woof','xxxxx1','talk','stephen1','suburban','stock','tabatha','sheeba','start1','soccer10','something','starcraft','soccer12','peanut1','plastics','penthous','peterbil','tools','tetsuo','torino','tennis1','termite','ladder','last','lemmein','lakewood','jughead','melrose','megane','reginald','redone','request','angela1','alive','alissa','goodgirl','gonzo1','golden1','gotyoass','656565','626262','capricor','chains','calvin1','foolish','fallon','getmoney','godfather','gabber','gilligan','runaway','salami','dummy','dungeon','dudedude','dumb','dope','opus','paragon','oxygen','panhead','pasadena','opendoor','odyssey','magellan','lottie','printing','pressure','prince1','trustme','christa','court','davies','neville','nono','bread','buffet','hound','kajak','killkill','mona','moto','mildred','winner1','vixen','whiteboy','versace','winona','voyager1','instant','indy','jackjack','bigal','beech','biggun','blake1','blue99','big1','woods','synergy','success1','336699','sixty9','shark1','skin','simba1','sharpe','sebring','spongebo','spunk','springs','sliver','phialpha','password9','pizza1','plane','perkins','pookey','tickling','lexingky','lawman','joe123','jolly','mike123','romeo1','redheads','reserve','apple123','alanis','ariane','antony','backbone','aviation','band','hand','green123','haley','carlitos','byebye','cartman1','camden','chewy','camaross','favorite6','forumwp','franks','ginscoot','fruity','sabrina1','devil666','doughnut','pantie','oldone','paintball','lumina','rainbow1','prosper','total','true','umbrella','ajax','951753','achtung','abc12345','compact','color','corn','complete','christi','closer','corndog','deerhunt','darklord','dank','nimitz','brandy1','bowl','breanna','holidays','hetfield','holein1','hillbill','hugetits','east','evolutio','kenobi','whiplash','waldo','wg8e3wjf','wing','istanbul','invis','1996','benton','bigjohn','bluebell','beef','beater','benji','bluejay','xyzzy','wrestling','storage','superior','suckdick','taichi','stellar','stephane','shaker','skirt','seymour','semper','splurge','squeak','pearls','playball','pitch','phyllis','pooky','piss','tomas','titfuck','joemama','johnny5','marcello','marjorie','married','maxi','rhubarb','rockwell','ratboy','reload','rooney','redd','1029','1030','1220','anchor','bbking','baritone','gryphon','gone','57chevy','494949','celeron','fishy','gladiator','fucker1','roswell','dougie','downer','dicker','diva','domingo','donjuan','nympho','omar','praise','racers','trick','trauma','truck1','trample','acer','corwin','cricket1','clemente','climax','denmark','cuervo','notnow','nittany','neutron','native','bosco1','buffa','breaker','hello2','hydro','estelle','exchange','explore','kisskiss','kittys','kristian','montecar','modem','mississi','mooney','weiner','washington','20012001','bigdick1','bibi','benfica','yahoo1','striper','tabasco','supra','383838','456654','seneca','serious','shuttle','socks','stanton','penguin1','pathfind','testibil','thethe','listen','lightning','lighting','jeter2','marma','mark1','metoo','republic','rollin','redleg','redbone','redskin','rocco','1245','armand','anthony7','altoids','andrews','barley','away','asswipe','bauhaus','bbbbbb1','gohome','harrier','golfpro','goldeney','818181','6666666','5000','5rxypn','cameron1','calling','checker','calibra','fields','freefree','faith1','fist','fdm7ed','finally','giraffe','glasses','giggles','fringe','gate','georgie','scamper','rrpass1','screwyou','duffy','deville','dimples','pacino','ontario','passthie','oberon','quest1','postov1000','puppydog','puffer','raining','protect','qwerty7','trey','tribe','ulysses','tribal','adam25','a1234567','compton','collie','cleopatr','contract','davide','norris','namaste','myrtle','buffalo1','bonovox','buckley','bukkake','burning','burner','bordeaux','burly','hun999','emilie','elmo','enters','enrique','keisha','mohawk','willard','vgirl','whale','vince','jayden','jarrett','1812','1943','222333','bigjim','bigd','zoom','wordup','ziggy1','yahooo','workout','young1','written','xmas','zzzzzz1','surfer1','strife','sunlight','tasha1','skunk','shauna','seth','soft','sprinter','peaches1','planes','pinetree','plum','pimping','theforce','thedon','toocool','leeann','laddie','list','lkjh','lara','joke','jupiter1','mckenzie','matty','rene','redrose','1200','102938','annmarie','alexa','antares','austin31','ground','goose1','737373','78945612','789987','6464','calimero','caster','casper1','cement','chevrolet','chessie','caddy','chill','child','canucks','feeling','favorite','fellatio','f00tball','francine','gateway2','gigi','gamecube','giovanna','rugby1','scheisse','dshade','dudes','dixie1','owen','offshore','olympia','lucas1','macaroni','manga','pringles','puff','tribble','trouble1','ussy','core','clint','coolhand','colonial','colt','debra','darthvad','dealer','cygnusx1','natalie1','newark','husband','hiking','errors','eighteen','elcamino','emmett','emilia','koolaid','knight1','murphy1','volcano','idunno','2005','2233','block','benito','blueberr','biguns','yamahar1','zapper','zorro1','0911','3006','sixsix','shopper','siobhan','sextoy','stafford','snowboard','speedway','sounds','pokey','peabody','playboy2','titi','think','toast','toonarmy','lister','lambda','joecool','jonas','joyce','juniper','mercer','max123','manny','massimo','mariposa','met2002','reggae','ricky1','1236','1228','1016','all4one','arianna','baberuth','asgard','gonzales','484848','5683','6669','catnip','chiquita','charisma','capslock','cashmone','chat','figure','galant','frenchy','gizmodo1','girlies','gabby','garner','screwy','doubled','divers','dte4uw','done','dragonfl','maker','locks','rachelle','treble','twinkie','trailer','tropical','acid','crescent','cooking','cococo','cory','dabomb','daffy','dandfa','cyrano','nathanie','briggs','boners','helium','horton','hoffman','hellas','espresso','emperor','killa','kikimora','wanda','w4g8at','verona','ilikeit','iforget','1944','20002000','birthday1','beatles1','blue1','bigdicks','beethove','blacklab','blazers','benny1','woodwork','0069','0101','taffy','susie','survivor','swim','stokes','4567','shodan','spoiled','steffen','pissed','pavlov','pinnacle','place','petunia','terrell','thirty','toni','tito','teenie','lemonade','lily','lillie','lalakers','lebowski','lalalala','ladyboy','jeeper','joyjoy','mercury1','mantle','mannn','rocknrol','riversid','reeves','123aaa','11112222','121314','1021','1004','1120','allen1','ambers','amstel','ambrose','alice1','alleycat','allegro','ambrosia','alley','australia','hatred','gspot','graves','goodsex','hattrick','harpoon','878787','8inches','4wwvte','cassandr','charlie123','case','chavez','fighting','gabriela','gatsby','fudge','gerry','generic','gareth','fuckme2','samm','sage','seadog','satchmo','scxakv','santafe','dipper','dingle','dizzy','outoutout','madmad','london1','qbg26i','pussy123','randolph','vaughn','tzpvaw','vamp','comedy','comp','cowgirl','coldplay','dawgs','delaney','nt5d27','novifarm','needles','notredam','newness','mykids','bryan1','bouncer','hihihi','honeybee','iceman1','herring','horn','hook','hotlips','dynamo','klaus','kittie','kappa','kahlua','muffy','mizzou','mohamed','musical','wannabe','wednesda','whatup','weller','waterfal','willy1','invest','blanche','bear1','billabon','youknow','zelda','yyyyyy1','zachary1','01234567','070462','zurich','superstar','storms','tail','stiletto','strat','427900','sigmachi','shelter','shells','sexy123','smile1','sophie1','stefano','stayout','somerset','smithers','playmate','pinkfloyd','phish1','payday','thebear','telefon','laetitia','kswbdu','larson','jetta','jerky','melina','metro','revoluti','retire','respect','1216','1201','1204','1222','1115','archange','barry1','handball','676767','chandra','chewbacc','flesh','furball','gocubs','fruit','fullback','gman','gentle','dunbar','dewalt','dominiqu','diver1','dhip6a','olemiss','ollie','mandrake','mangos','pretzel','pusssy','tripleh','valdez','vagabond','clean','comment','crew','clovis','deaths','dandan','csfbr5yy','deadspin','darrel','ninguna','noah','ncc74656','bootsie','bp2002','bourbon','brennan','bumble','books','hose','heyyou','houston1','hemlock','hippo','hornets','hurricane','horseman','hogan','excess','extensa','muffin1','virginie','werdna','idontknow','info','iron','jack1','1bitch','151nxjmt','bendover','bmwbmw','bills','zaq123','wxcvbn','surprise','supernov','tahoe','talbot','simona','shakur','sexyone','seviyi','sonja','smart1','speed1','pepito','phantom1','playoffs','terry1','terrier','laser1','lite','lancia','johngalt','jenjen','jolene','midori','message','maserati','matteo','mental','miami1','riffraff','ronald1','reason','rhythm','1218','1026','123987','1015','1103','armada','architec','austria','gotmilk','hawkins','gray','camila','camp','cambridg','charge','camero','flex','foreplay','getoff','glacier','glotest','froggie','gerbil','rugger','sanity72','salesman','donna1','dreaming','deutsch','orchard','oyster','palmtree','ophelia','pajero','m5wkqf','magenta','luckyone','treefrog','vantage','usmarine','tyvugq','uptown','abacab','aaaaaa1','advance','chuck1','delmar','darkange','cyclones','nate','navajo','nope','border','bubba123','building','iawgk2','hrfzlz','dylan1','enrico','encore','emilio','eclipse1','killian','kayleigh','mutant','mizuno','mustang2','video1','viewer','weed420','whales','jaguar1','insight','1990','159159','1love','bliss','bears1','bigtruck','binder','bigboss','blitz','xqgann','yeahyeah','zeke','zardoz','stickman','table','3825','signal','sentra','side','shiva','skipper1','singapor','southpaw','sonora','squid','slamdunk','slimjim','placid','photon','placebo','pearl1','test12','therock1','tiger123','leinad','legman','jeepers','joeblow','mccarthy','mike23','redcar','rhinos','rjw7x4','1102','13576479','112211','alcohol','gwju3g','greywolf','7bgiqk','7878','535353','4snz9g','candyass','cccccc1','carola','catfight','cali','fister','fosters','finland','frankie1','gizzmo','fuller','royalty','rugrat','sandie','rudolf','dooley','dive','doreen','dodo','drop','oemdlg','out3xf','paddy','opennow','puppy1','qazwsxedc','pregnant','quinn','ramjet','under','uncle','abraxas','corner','creed','cocoa','crown','cows','cn42qj','dancer1','death666','damned','nudity','negative','nimda2k','buick','bobb','braves1','brook','henrik','higher','hooligan','dust','everlast','karachi','mortis','mulligan','monies','motocros','wally1','weapon','waterman','view','willie1','vicki','inspiron','1test','2929','bigblack','xytfu7','yackwin','zaq1xsw2','yy5rbfsc','100100','0660','tahiti','takehana','talks','332211','3535','sedona','seawolf','skydiver','shine','spleen','slash','spjfet','special1','spooner','slimshad','sopranos','spock1','penis1','patches1','terri','thierry','thething','toohot','large','limpone','johnnie','mash4077','matchbox','masterp','maxdog','ribbit','reed','rita','rockin','redhat','rising','1113','14789632','1331','allday','aladin','andrey','amethyst','ariel','anytime','baseball1','athome','basil','goofy1','greenman','gustavo','goofball','ha8fyp','goodday','778899','charon','chappy','castillo','caracas','cardiff','capitals','canada1','cajun','catter','freddy1','favorite2','frazier','forme','follow','forsaken','feelgood','gavin','gfxqx686','garlic','sarge','saskia','sanjose','russ','salsa','dilbert1','dukeduke','downhill','longhair','loop','locutus','lockdown','malachi','mamacita','lolipop','rainyday','pumpkin1','punker','prospect','rambo1','rainbows','quake','twin','trinity1','trooper1','aimee','citation','coolcat','crappy','default','dental','deniro','d9ungl','daddys','napoli','nautica','nermal','bukowski','brick','bubbles1','bogota','board','branch','breath','buds','hulk','humphrey','hitachi','evans','ender','export','kikiki','kcchiefs','kram','morticia','montrose','mongo','waqw3p','wizzard','visited','whdbtp','whkzyc','image','154ugeiu','1fuck','binky','blind','bigred1','blubber','benz','becky1','year2005','wonderfu','wooden','xrated','0001','tampabay','survey','tammy1','stuffer','3mpz4r','3000','3some','selina','sierra1','shampoo','silk','shyshy','slapnuts','standby','spartan1','sprocket','sometime','stanley1','poker1','plus','thought','theshit','torture','thinking','lavalamp','light1','laserjet','jediknig','jjjjj1','jocelyn','mazda626','menthol','maximo','margaux','medic1','release','richter','rhino1','roach','renate','repair','reveal','1209','1234321','amigos','apricot','alexandra','asdfgh1','hairball','hatter','graduate','grimace','7xm5rq','6789','cartoons','capcom','cheesy','cashflow','carrots','camping','fanatic','fool','format','fleming','girlie','glover','gilmore','gardner','safeway','ruthie','dogfart','dondon','diapers','outsider','odin','opiate','lollol','love12','loomis','mallrats','prague','primetime21','pugsley','program','r29hqq','touch','valleywa','airman','abcdefg1','darkone','cummer','dempsey','damn','nadia','natedogg','nineball','ndeyl5','natchez','newone','normandy','nicetits','buddy123','buddys','homely','husky','iceland','hr3ytm','highlife','holla','earthlin','exeter','eatmenow','kimkim','karine','k2trix','kernel','kirkland','money123','moonman','miles1','mufasa','mousey','wilma','wilhelm','whites','warhamme','instinct','jackass1','2277','20spanks','blobby','blair','blinky','bikers','blackjack','becca','blue23','xman','wyvern','085tzzqi','zxzxzx','zsmj2v','suede','t26gn4','sugars','sylvie','tantra','swoosh','swiss','4226','4271','321123','383pdjvl','shoe','shane1','shelby1','spades','spain','smother','soup','sparhawk','pisser','photo1','pebble','phones','peavey','picnic','pavement','terra','thistle','tokyo','therapy','lives','linden','kronos','lilbit','linux','johnston','material','melanie1','marbles','redlight','reno','recall','1208','1138','1008','alchemy','aolsucks','alexalex','atticus','auditt','ballet','b929ezzh','goodyear','hanna','griffith','gubber','863abgsg','7474','797979','464646','543210','4zqauf','4949','ch5nmk','carlito','chewey','carebear','caleb','checkmat','cheddar','chachi','fever','forgetit','fine','forlife','giants1','gates','getit','gamble','gerhard','galileo','g3ujwg','ganja','rufus1','rushmore','scouts','discus','dudeman','olympus','oscars','osprey','madcow','locust','loyola','mammoth','proton','rabbit1','question','ptfe3xxp','pwxd5x','purple1','punkass','prophecy','uyxnyd','tyson1','aircraft','access99','abcabc','cocktail','colts','civilwar','cleveland','claudia1','contour','clement','dddddd1','cypher','denied','dapzu455','dagmar','daisydog','name','noles','butters','buford','hoochie','hotel','hoser','eddy','ellis','eldiablo','kingrich','mudvayne','motown','mp8o6d','wife','vipergts','italiano','innocent','2055','2211','beavers','bloke','blade1','yamato','zooropa','yqlgr667','050505','zxcvbnm1','zw6syj','suckcock','tango1','swing','stern','stephens','swampy','susanna','tammie','445566','333666','380zliki','sexpot','sexylady','sixtynin','sickboy','spiffy','sleeping','skylark','sparkles','slam','pintail','phreak','places','teller','timtim','tires','thighs','left','latex','llamas','letsdoit','lkjhg','landmark','letters','lizzard','marlins','marauder','metal1','manu','register','righton','1127','alain','alcat','amigo','basebal1','azertyui','attract','azrael','hamper','gotenks','golfgti','gutter','hawkwind','h2slca','harman','grace1','6chid8','789654','canine','casio','cazzo','chamber','cbr900','cabrio','calypso','capetown','feline','flathead','fisherma','flipmode','fungus','goal','g9zns4','full','giggle','gabriel1','fuck123','saffron','dogmeat','dreamcas','dirtydog','dunlop','douche','dresden','dickdick','destiny1','pappy','oaktree','lydia','luft4','puta','prayer','ramada','trumpet1','vcradq','tulip','tracy71','tycoon','aaaaaaa1','conquest','click','chitown','corps','creepers','constant','couples','code','cornhole','danman','dada','density','d9ebk7','cummins','darth','cute','nash','nirvana1','nixon','norbert','nestle','brenda1','bonanza','bundy','buddies','hotspur','heavy','horror','hufmqw','electro','erasure','enough','elisabet','etvww4','ewyuza','eric1','kinder','kenken','kismet','klaatu','musician','milamber','willi','waiting','isacs155','igor','1million','1letmein','x35v8l','yogi','ywvxpz','xngwoj','zippy1','020202','****','stonewal','sweeney','story','sentry','sexsexsex','spence','sonysony','smirnoff','star12','solace','sledge','states','snyder','star1','paxton','pentagon','pkxe62','pilot1','pommes','paulpaul','plants','tical','tictac','toes','lighthou','lemans','kubrick','letmein22','letmesee','jys6wz','jonesy','jjjjjj1','jigga','joelle','mate','merchant','redstorm','riley1','rosa','relief','14141414','1126','allison1','badboy1','asthma','auggie','basement','hartley','hartford','hardwood','gumbo','616913','57np39','56qhxs','4mnveh','cake','forbes','fatluvr69','fqkw5m','fidelity','feathers','fresno','godiva','gecko','gladys','gibson1','gogators','fridge','general1','saxman','rowing','sammys','scotts','scout1','sasasa','samoht','dragon69','ducky','dragonball','driller','p3wqaw','nurse','papillon','oneone','openit','optimist','longshot','portia','rapier','pussy2','ralphie','tuxedo','ulrike','undertow','trenton','copenhag','come','delldell','culinary','deltas','mytime','nicky','nickie','noname','noles1','bucker','bopper','bullock','burnout','bryce','hedges','ibilltes','hihje863','hitter','ekim','espana','eatme69','elpaso','envelope','express1','eeeeee1','eatme1','karaoke','kara','mustang5','misses','wellingt','willem','waterski','webcam','jasons','infinite','iloveyou!','jakarta','belair','bigdad','beerme','yoshi','yinyang','zimmer','x24ik3','063dyjuy','0000007','ztmfcq','stopit','stooges','survival','stockton','symow8','strato','2hot4u','ship','simons','skins','shakes','sex1','shield','snacks','softtail','slimed123','pizzaman','pipe','pitt','pathetic','pinto','tigercat','tonton','lager','lizzy','juju','john123','jennings','josiah','jesse1','jordon','jingles','martian','mario1','rootedit','rochard','redwine','requiem','riverrat','rats','1117','1014','1205','althea','allie','amor','amiga','alpina','alert','atreides','banana1','bahamut','hart','golfman','happines','7uftyx','5432','5353','5151','4747','byron','chatham','chadwick','cherie','foxfire','ffvdj474','freaked','foreskin','gayboy','gggggg1','glenda','gameover','glitter','funny1','scoobydoo','scroll','rudolph','saddle','saxophon','dingbat','digimon','omicron','parsons','ohio','panda1','loloxx','macintos','lululu','lollypop','racer1','queen1','qwertzui','prick','upnfmc','tyrant','trout1','9skw5g','aceman','adelaide','acls2h','aaabbb','acapulco','aggie','comcast','craft','crissy','cloudy','cq2kph','custer','d6o8pm','cybersex','davecole','darian','crumbs','daisey','davedave','dasani','needle','mzepab','myporn','narnia','nineteen','booger1','bravo1','budgie','btnjey','highlander','hotel6','humbug','edwin','ewtosi','kristin1','kobe','knuckles','keith1','katarina','muff','muschi','montana1','wingchun','wiggle','whatthe','walking','watching','vette1','vols','virago','intj3a','ishmael','intern','jachin','illmatic','199999','2010','beck','blender','bigpenis','bengal','blue1234','your','zaqxsw','xray','xxxxxxx1','zebras','yanks','worlds','tadpole','stripes','svetlana','3737','4343','3728','4444444','368ejhih','solar','sonne','smalls','sniffer','sonata','squirts','pitcher','playstation','pktmxr','pescator','points','texaco','lesbos','lilian','l8v53x','jo9k2jw2','jimbeam','josie','jimi','jupiter2','jurassic','marines1','maya','rocket1','ringer','14725836','12345679','1219','123098','1233','alessand','althor','angelika','arch','armando','alpha123','basher','barefeet','balboa','bbbbb1','banks','badabing','harriet','gopack','golfnut','gsxr1000','gregory1','766rglqy','8520','753159','8dihc6','69camaro','666777','cheeba','chino','calendar','cheeky','camel1','fishcake','falling','flubber','giuseppe','gianni','gloves','gnasher23','frisbee','fuzzy1','fuzzball','sauce','save13tx','schatz','russell1','sandra1','scrotum','scumbag','sabre','samdog','dripping','dragon12','dragster','paige','orwell','mainland','lunatic','lonnie','lotion','maine','maddux','qn632o','poophead','rapper','porn4life','producer','rapunzel','tracks','velocity','vanessa1','ulrich','trueblue','vampire1','abacus','902100','crispy','corky','crane','chooch','d6wnro','cutie','deal','dabulls','dehpye','navyseal','njqcw4','nownow','nigger1','nightowl','nonenone','nightmar','bustle','buddy2','boingo','bugman','bulletin','bosshog','bowie','hybrid','hillside','hilltop','hotlegs','honesty','hzze929b','hhhhh1','hellohel','eloise','evilone','edgewise','e5pftu','eded','embalmer','excalibur','elefant','kenzie','karl','karin','killah','kleenex','mouses','mounta1n','motors','mutley','muffdive','vivitron','winfield','wednesday','w00t88','iloveit','jarjar','incest','indycar','17171717','1664','17011701','222777','2663','beelch','benben','yitbos','yyyyy1','yasmin','zapata','zzzzz1','stooge','tangerin','taztaz','stewart1','summer69','sweetness','system1','surveyor','stirling','3qvqod','3way','456321','sizzle','simhrq','shrink','shawnee','someday','sparty','ssptx452','sphere','spark','slammed','sober','persian','peppers','ploppy','pn5jvw','poobear','pianos','plaster','testme','tiff','thriller','larissa','lennox','jewell','master12','messier','rockey','1229','1217','1478','1009','anastasi','almighty','amonra','aragon','argentin','albino','azazel','grinder','6uldv8','83y6pv','8888888','4tlved','515051','carsten','changes','flanders','flyers88','ffffff1','firehawk','foreman','firedog','flashman','ggggg1','gerber','godspeed','galway','giveitup','funtimes','gohan','giveme','geryfe','frenchie','sayang','rudeboy','savanna','sandals','devine','dougal','drag0n','dga9la','disaster','desktop','only','onlyone','otter','pandas','mafia','lombard','luckys','lovejoy','lovelife','manders','product','qqh92r','qcmfd454','pork','radar1','punani','ptbdhw','turtles','undertaker','trs8f7','tramp','ugejvp','abba','911turbo','acdc','abcd123','clever','corina','cristian','create','crash1','colony','crosby','delboy','daniele','davinci','daughter','notebook','niki','nitrox','borabora','bonzai','budd','brisbane','hotter','heeled','heroes','hooyah','hotgirl','i62gbq','horse1','hills','hpk2qc','epvjb6','echo','korean','kristie','mnbvc','mohammad','mind','mommy1','munster','wade','wiccan','wanted','jacket','2369','bettyboo','blondy','bismark','beanbag','bjhgfi','blackice','yvtte545','ynot','yess','zlzfrh','wolvie','007bond','******','tailgate','tanya1','sxhq65','stinky1','3234412','3ki42x','seville','shimmer','sheryl','sienna','shitshit','skillet','seaman','sooners1','solaris','smartass','pastor','pasta','pedros','pennywis','pfloyd','tobydog','thetruth','lethal','letme1n','leland','jenifer','mario66','micky','rocky2','rewq','ripped','reindeer','1128','1207','1104','1432','aprilia','allstate','alyson','bagels','basic','baggies','barb','barrage','greatest','gomez','guru','guard','72d5tn','606060','4wcqjn','caldwell','chance1','catalog','faust','film','flange','fran','fartman','geil','gbhcf2','fussball','glen','fuaqz4','gameboy','garnet','geneviev','rotary','seahawk','russel','saab','seal','samadams','devlt4','ditto','drevil','drinker','deuce','dipstick','donut','octopus','ottawa','losangel','loverman','porky','q9umoz','rapture','pump','pussy4me','university','triplex','ue8fpw','trent','trophy','turbos','troubles','agent','aaa340','churchil','crazyman','consult','creepy','craven','class','cutiepie','ddddd1','dejavu','cuxldv','nettie','nbvibt','nikon','niko','norwood','nascar1','nolan','bubba2','boobear','boogers','buff','bullwink','bully','bulldawg','horsemen','escalade','editor','eagle2','dynamic','ella','efyreg','edition','kidney','minnesot','mogwai','morrow','msnxbi','moonlight','mwq6qlzo','wars','werder','verygood','voodoo1','wheel','iiiiii1','159951','1624','1911a1','2244','bellagio','bedlam','belkin','bill1','woodrow','xirt2k','worship','??????','tanaka','swift','susieq','sundown','sukebe','tales','swifty','2fast4u','senate','sexe','sickness','shroom','shaun','seaweed','skeeter1','status','snicker','sorrow','spanky1','spook','patti','phaedrus','pilots','pinch','peddler','theo','thumper1','tessie','tiger7','tmjxn151','thematri','l2g7k3','letmeinn','lazy','jeffjeff','joan','johnmish','mantra','mariana','mike69','marshal','mart','mazda6','riptide','robots','rental','1107','1130','142857','11001001','1134','armored','alvin','alec','allnight','alright','amatuers','bartok','attorney','astral','baboon','bahamas','balls1','bassoon','hcleeb','happyman','granite','graywolf','golf1','gomets','8vjzus','7890','789123','8uiazp','5757','474jdvff','551scasi','50cent','camaro1','cherry1','chemist','final','firenze','fishtank','farrell','freewill','glendale','frogfrog','gerhardt','ganesh','same','scirocco','devilman','doodles','dinger','okinawa','olympic','nursing','orpheus','ohmygod','paisley','pallmall','null','lounge','lunchbox','manhatta','mahalo','mandarin','qwqwqw','qguvyt','pxx3eftp','president','rambler','puzzle','poppy1','turk182','trotter','vdlxuc','trish','tugboat','valiant','tracie','uwrl7c','chris123','coaster','cmfnpu','decimal','debbie1','dandy','daedalus','dede','natasha1','nissan1','nancy123','nevermin','napalm','newcastle','boats','branden','britt','bonghit','hester','ibxnsm','hhhhhh1','holger','durham','edmonton','erwin','equinox','dvader','kimmy','knulla','mustafa','monsoon','mistral','morgana','monica1','mojave','month','monterey','mrbill','vkaxcs','victor1','wacker','wendell','violator','vfdhif','wilson1','wavpzt','verena','wildstar','winter99','iqzzt580','jarrod','imback','1914','19741974','1monkey','1q2w3e4r5t','2500','2255','blank','bigshow','bigbucks','blackcoc','zoomer','wtcacq','wobble','xmen','xjznq5','yesterda','yhwnqc','zzzxxx','streak','393939','2fchbg','skinhead','skilled','shakira','shaft','shadow12','seaside','sigrid','sinful','silicon','smk7366','snapshot','sniper1','soccer11','staff','slap','smutty','peepers','pleasant','plokij','pdiddy','pimpdaddy','thrust','terran','topaz','today1','lionhear','littlema','lauren1','lincoln1','lgnu9d','laughing','juneau','methos','medina','merlyn','rogue1','romulus','redshift','1202','1469','12locked','arizona1','alfarome','al9agd','aol123','altec','apollo1','arse','baker1','bbb747','bach','axeman','astro1','hawthorn','goodfell','hawks1','gstring','hannes','8543852','868686','4ng62t','554uzpad','5401','567890','5232','catfood','frame','flow','fire1','flipflop','fffff1','fozzie','fluff','garrison','fzappa','furious','round','rustydog','sandberg','scarab','satin','ruger','samsung1','destin','diablo2','dreamer1','detectiv','dominick','doqvq3','drywall','paladin1','papabear','offroad','panasonic','nyyankee','luetdi','qcfmtz','pyf8ah','puddles','privacy','rainer','pussyeat','ralph1','princeto','trivia','trewq','tri5a3','advent','9898','agyvorc','clarkie','coach1','courier','contest','christo','corinna','chowder','concept','climbing','cyzkhw','davidb','dad2ownu','days','daredevi','de7mdf','nose','necklace','nazgul','booboo1','broad','bonzo','brenna','boot','butch1','huskers1','hgfdsa','hornyman','elmer','elektra','england1','elodie','kermit1','knife','kaboom','minute','modern','motherfucker','morten','mocha','monday1','morgoth','ward','weewee','weenie','walters','vorlon','website','wahoo','ilovegod','insider','jayman','1911','1dallas','1900','1ranger','201jedlz','2501','1qaz','bertram','bignuts','bigbad','beebee','billows','belize','bebe','wvj5np','wu4etd','yamaha1','wrinkle5','zebra1','yankee1','zoomzoom','09876543','0311','?????','stjabn','tainted','3tmnej','shoot','skooter','skelter','sixteen','starlite','smack','spice1','stacey1','smithy','perrin','pollux','peternorth','pixie','paulina','piston','pick','poets','pine','toons','tooth','topspin','kugm7b','legends','jeepjeep','juliana','joystick','junkmail','jojojojo','jonboy','judge','midland','meteor','mccabe','matter','mayfair','meeting','merrill','raul','riches','reznor','rockrock','reboot','reject','robyn','renee1','roadway','rasta220','1411','1478963','1019','archery','allman','andyandy','barks','bagpuss','auckland','gooseman','hazmat','gucci','guns','grammy','happydog','greek','7kbe9d','7676','6bjvpe','5lyedn','5858','5291','charlie2','chas','c7lrwu','candys','chateau','ccccc1','cardinals','fear','fihdfv','fortune12','gocats','gaelic','fwsadn','godboy','gldmeo','fx3tuo','fubar1','garland','generals','gforce','rxmtkp','rulz','sairam','dunhill','division','dogggg','detect','details','doll','drinks','ozlq6qwm','ov3ajy','lockout','makayla','macgyver','mallorca','loves','prima','pvjegu','qhxbij','raphael','prelude1','totoro','tusymo','trousers','tunnel','valeria','tulane','turtle1','tracy1','aerosmit','abbey1','address','clticic','clueless','cooper1','comets','collect','corbin','delpiero','derick','cyprus','dante1','dave1','nounours','neal','nexus6','nero','nogard','norfolk','brent1','booyah','bootleg','buckaroo','bulls23','bulls1','booper','heretic','icecube','hellno','hounds','honeydew','hooters1','hoes','howie','hevnm4','hugohugo','eighty','epson','evangeli','eeeee1','eyphed']
english_first_names = ['Aaren','Aarika','Abagael','Abagail','Abbe','Abbey','Abbi','Abbie','Abby','Abbye','Abigael','Abigail','Abigale','Abra','Ada','Adah','Adaline','Adan','Adara','Adda','Addi','Addia','Addie','Addy','Adel','Adela','Adelaida','Adelaide','Adele','Adelheid','Adelice','Adelina','Adelind','Adeline','Adella','Adelle','Adena','Adey','Adi','Adiana','Adina','Adora','Adore','Adoree','Adorne','Adrea','Adria','Adriaens','Adrian','Adriana','Adriane','Adrianna','Adrianne','Adriena','Adrienne','Aeriel','Aeriela','Aeriell','Afton','Ag','Agace','Agata','Agatha','Agathe','Aggi','Aggie','Aggy','Agna','Agnella','Agnes','Agnese','Agnesse','Agneta','Agnola','Agretha','Aida','Aidan','Aigneis','Aila','Aile','Ailee','Aileen','Ailene','Ailey','Aili','Ailina','Ailis','Ailsun','Ailyn','Aime','Aimee','Aimil','Aindrea','Ainslee','Ainsley','Ainslie','Ajay','Alaine','Alameda','Alana','Alanah','Alane','Alanna','Alayne','Alberta','Albertina','Albertine','Albina','Alecia','Aleda','Aleece','Aleen','Alejandra','Alejandrina','Alena','Alene','Alessandra','Aleta','Alethea','Alex','Alexa','Alexandra','Alexandrina','Alexi','Alexia','Alexina','Alexine','Alexis','Alfi','Alfie','Alfreda','Alfy','Ali','Alia','Alica','Alice','Alicea','Alicia','Alida','Alidia','Alie','Alika','Alikee','Alina','Aline','Alis','Alisa','Alisha','Alison','Alissa','Alisun','Alix','Aliza','Alla','Alleen','Allegra','Allene','Alli','Allianora','Allie','Allina','Allis','Allison','Allissa','Allix','Allsun','Allx','Ally','Allyce','Allyn','Allys','Allyson','Alma','Almeda','Almeria','Almeta','Almira','Almire','Aloise','Aloisia','Aloysia','Alta','Althea','Alvera','Alverta','Alvina','Alvinia','Alvira','Alyce','Alyda','Alys','Alysa','Alyse','Alysia','Alyson','Alyss','Alyssa','Amabel','Amabelle','Amalea','Amalee','Amaleta','Amalia','Amalie','Amalita','Amalle','Amanda','Amandi','Amandie','Amandy','Amara','Amargo','Amata','Amber','Amberly','Ambur','Ame','Amelia','Amelie','Amelina','Ameline','Amelita','Ami','Amie','Amii','Amil','Amitie','Amity','Ammamaria','Amy','Amye','Ana','Anabal','Anabel','Anabella','Anabelle','Analiese','Analise','Anallese','Anallise','Anastasia','Anastasie','Anastassia','Anatola','Andee','Andeee','Anderea','Andi','Andie','Andra','Andrea','Andreana','Andree','Andrei','Andria','Andriana','Andriette','Andromache','Andy','Anestassia','Anet','Anett','Anetta','Anette','Ange','Angel','Angela','Angele','Angelia','Angelica','Angelika','Angelina','Angeline','Angelique','Angelita','Angelle','Angie','Angil','Angy','Ania','Anica','Anissa','Anita','Anitra','Anjanette','Anjela','Ann','Ann-Marie','Anna','Anna-Diana','Anna-Diane','Anna-Maria','Annabal','Annabel','Annabela','Annabell','Annabella','Annabelle','Annadiana','Annadiane','Annalee','Annaliese','Annalise','Annamaria','Annamarie','Anne','Anne-Corinne','Anne-Marie','Annecorinne','Anneliese','Annelise','Annemarie','Annetta','Annette','Anni','Annice','Annie','Annis','Annissa','Annmaria','Annmarie','Annnora','Annora','Anny','Anselma','Ansley','Anstice','Anthe','Anthea','Anthia','Anthiathia','Antoinette','Antonella','Antonetta','Antonia','Antonie','Antonietta','Antonina','Anya','Appolonia','April','Aprilette','Ara','Arabel','Arabela','Arabele','Arabella','Arabelle','Arda','Ardath','Ardeen','Ardelia','Ardelis','Ardella','Ardelle','Arden','Ardene','Ardenia','Ardine','Ardis','Ardisj','Ardith','Ardra','Ardyce','Ardys','Ardyth','Aretha','Ariadne','Ariana','Aridatha','Ariel','Ariela','Ariella','Arielle','Arlana','Arlee','Arleen','Arlen','Arlena','Arlene','Arleta','Arlette','Arleyne','Arlie','Arliene','Arlina','Arlinda','Arline','Arluene','Arly','Arlyn','Arlyne','Aryn','Ashely','Ashia','Ashien','Ashil','Ashla','Ashlan','Ashlee','Ashleigh','Ashlen','Ashley','Ashli','Ashlie','Ashly','Asia','Astra','Astrid','Astrix','Atalanta','Athena','Athene','Atlanta','Atlante','Auberta','Aubine','Aubree','Aubrette','Aubrey','Aubrie','Aubry','Audi','Audie','Audra','Audre','Audrey','Audrie','Audry','Audrye','Audy','Augusta','Auguste','Augustina','Augustine','Aundrea','Aura','Aurea','Aurel','Aurelea','Aurelia','Aurelie','Auria','Aurie','Aurilia','Aurlie','Auroora','Aurora','Aurore','Austin','Austina','Austine','Ava','Aveline','Averil','Averyl','Avie','Avis','Aviva','Avivah','Avril','Avrit','Ayn','Bab','Babara','Babb','Babbette','Babbie','Babette','Babita','Babs','Bambi','Bambie','Bamby','Barb','Barbabra','Barbara','Barbara-Anne','Barbaraanne','Barbe','Barbee','Barbette','Barbey','Barbi','Barbie','Barbra','Barby','Bari','Barrie','Barry','Basia','Bathsheba','Batsheva','Bea','Beatrice','Beatrisa','Beatrix','Beatriz','Bebe','Becca','Becka','Becki','Beckie','Becky','Bee','Beilul','Beitris','Bekki','Bel','Belia','Belicia','Belinda','Belita','Bell','Bella','Bellanca','Belle','Bellina','Belva','Belvia','Bendite','Benedetta','Benedicta','Benedikta','Benetta','Benita','Benni','Bennie','Benny','Benoite','Berenice','Beret','Berget','Berna','Bernadene','Bernadette','Bernadina','Bernadine','Bernardina','Bernardine','Bernelle','Bernete','Bernetta','Bernette','Berni','Bernice','Bernie','Bernita','Berny','Berri','Berrie','Berry','Bert','Berta','Berte','Bertha','Berthe','Berti','Bertie','Bertina','Bertine','Berty','Beryl','Beryle','Bess','Bessie','Bessy','Beth','Bethanne','Bethany','Bethena','Bethina','Betsey','Betsy','Betta','Bette','Bette-Ann','Betteann','Betteanne','Betti','Bettina','Bettine','Betty','Bettye','Beulah','Bev','Beverie','Beverlee','Beverley','Beverlie','Beverly','Bevvy','Bianca','Bianka','Bibbie','Bibby','Bibbye','Bibi','Biddie','Biddy','Bidget','Bili','Bill','Billi','Billie','Billy','Billye','Binni','Binnie','Binny','Bird','Birdie','Birgit','Birgitta','Blair','Blaire','Blake','Blakelee','Blakeley','Blanca','Blanch','Blancha','Blanche','Blinni','Blinnie','Blinny','Bliss','Blisse','Blithe','Blondell','Blondelle','Blondie','Blondy','Blythe','Bobbe','Bobbee','Bobbette','Bobbi','Bobbie','Bobby','Bobbye','Bobette','Bobina','Bobine','Bobinette','Bonita','Bonnee','Bonni','Bonnibelle','Bonnie','Bonny','Brana','Brandais','Brande','Brandea','Brandi','Brandice','Brandie','Brandise','Brandy','Breanne','Brear','Bree','Breena','Bren','Brena','Brenda','Brenn','Brenna','Brett','Bria','Briana','Brianna','Brianne','Bride','Bridget','Bridgette','Bridie','Brier','Brietta','Brigid','Brigida','Brigit','Brigitta','Brigitte','Brina','Briney','Brinn','Brinna','Briny','Brit','Brita','Britney','Britni','Britt','Britta','Brittan','Brittaney','Brittani','Brittany','Britte','Britteny','Brittne','Brittney','Brittni','Brook','Brooke','Brooks','Brunhilda','Brunhilde','Bryana','Bryn','Bryna','Brynn','Brynna','Brynne','Buffy','Bunni','Bunnie','Bunny','Cacilia','Cacilie','Cahra','Cairistiona','Caitlin','Caitrin','Cal','Calida','Calla','Calley','Calli','Callida','Callie','Cally','Calypso','Cam','Camala','Camel','Camella','Camellia','Cami','Camila','Camile','Camilla','Camille','Cammi','Cammie','Cammy','Candace','Candi','Candice','Candida','Candide','Candie','Candis','Candra','Candy','Caprice','Cara','Caralie','Caren','Carena','Caresa','Caressa','Caresse','Carey','Cari','Caria','Carie','Caril','Carilyn','Carin','Carina','Carine','Cariotta','Carissa','Carita','Caritta','Carla','Carlee','Carleen','Carlen','Carlene','Carley','Carlie','Carlin','Carlina','Carline','Carlita','Carlota','Carlotta','Carly','Carlye','Carlyn','Carlynn','Carlynne','Carma','Carmel','Carmela','Carmelia','Carmelina','Carmelita','Carmella','Carmelle','Carmen','Carmencita','Carmina','Carmine','Carmita','Carmon','Caro','Carol','Carol-Jean','Carola','Carolan','Carolann','Carole','Carolee','Carolin','Carolina','Caroline','Caroljean','Carolyn','Carolyne','Carolynn','Caron','Carree','Carri','Carrie','Carrissa','Carroll','Carry','Cary','Caryl','Caryn','Casandra','Casey','Casi','Casie','Cass','Cassandra','Cassandre','Cassandry','Cassaundra','Cassey','Cassi','Cassie','Cassondra','Cassy','Catarina','Cate','Caterina','Catha','Catharina','Catharine','Cathe','Cathee','Catherin','Catherina','Catherine','Cathi','Cathie','Cathleen','Cathlene','Cathrin','Cathrine','Cathryn','Cathy','Cathyleen','Cati','Catie','Catina','Catlaina','Catlee','Catlin','Catrina','Catriona','Caty','Caye','Cayla','Cecelia','Cecil','Cecile','Ceciley','Cecilia','Cecilla','Cecily','Ceil','Cele','Celene','Celesta','Celeste','Celestia','Celestina','Celestine','Celestyn','Celestyna','Celia','Celie','Celina','Celinda','Celine','Celinka','Celisse','Celka','Celle','Cesya','Chad','Chanda','Chandal','Chandra','Channa','Chantal','Chantalle','Charil','Charin','Charis','Charissa','Charisse','Charita','Charity','Charla','Charlean','Charleen','Charlena','Charlene','Charline','Charlot','Charlotta','Charlotte','Charmain','Charmaine','Charmane','Charmian','Charmine','Charmion','Charo','Charyl','Chastity','Chelsae','Chelsea','Chelsey','Chelsie','Chelsy','Cher','Chere','Cherey','Cheri','Cherianne','Cherice','Cherida','Cherie','Cherilyn','Cherilynn','Cherin','Cherise','Cherish','Cherlyn','Cherri','Cherrita','Cherry','Chery','Cherye','Cheryl','Cheslie','Chiarra','Chickie','Chicky','Chiquia','Chiquita','Chlo','Chloe','Chloette','Chloris','Chris','Chrissie','Chrissy','Christa','Christabel','Christabella','Christal','Christalle','Christan','Christean','Christel','Christen','Christi','Christian','Christiana','Christiane','Christie','Christin','Christina','Christine','Christy','Christye','Christyna','Chrysa','Chrysler','Chrystal','Chryste','Chrystel','Cicely','Cicily','Ciel','Cilka','Cinda','Cindee','Cindelyn','Cinderella','Cindi','Cindie','Cindra','Cindy','Cinnamon','Cissiee','Cissy','Clair','Claire','Clara','Clarabelle','Clare','Claresta','Clareta','Claretta','Clarette','Clarey','Clari','Claribel','Clarice','Clarie','Clarinda','Clarine','Clarissa','Clarisse','Clarita','Clary','Claude','Claudelle','Claudetta','Claudette','Claudia','Claudie','Claudina','Claudine','Clea','Clem','Clemence','Clementia','Clementina','Clementine','Clemmie','Clemmy','Cleo','Cleopatra','Clerissa','Clio','Clo','Cloe','Cloris','Clotilda','Clovis','Codee','Codi','Codie','Cody','Coleen','Colene','Coletta','Colette','Colleen','Collen','Collete','Collette','Collie','Colline','Colly','Con','Concettina','Conchita','Concordia','Conni','Connie','Conny','Consolata','Constance','Constancia','Constancy','Constanta','Constantia','Constantina','Constantine','Consuela','Consuelo','Cookie','Cora','Corabel','Corabella','Corabelle','Coral','Coralie','Coraline','Coralyn','Cordelia','Cordelie','Cordey','Cordi','Cordie','Cordula','Cordy','Coreen','Corella','Corenda','Corene','Coretta','Corette','Corey','Cori','Corie','Corilla','Corina','Corine','Corinna','Corinne','Coriss','Corissa','Corliss','Corly','Cornela','Cornelia','Cornelle','Cornie','Corny','Correna','Correy','Corri','Corrianne','Corrie','Corrina','Corrine','Corrinne','Corry','Cortney','Cory','Cosetta','Cosette','Costanza','Courtenay','Courtnay','Courtney','Crin','Cris','Crissie','Crissy','Crista','Cristabel','Cristal','Cristen','Cristi','Cristie','Cristin','Cristina','Cristine','Cristionna','Cristy','Crysta','Crystal','Crystie','Cthrine','Cyb','Cybil','Cybill','Cymbre','Cynde','Cyndi','Cyndia','Cyndie','Cyndy','Cynthea','Cynthia','Cynthie','Cynthy','Dacey','Dacia','Dacie','Dacy','Dael','Daffi','Daffie','Daffy','Dagmar','Dahlia','Daile','Daisey','Daisi','Daisie','Daisy','Dale','Dalenna','Dalia','Dalila','Dallas','Daloris','Damara','Damaris','Damita','Dana','Danell','Danella','Danette','Dani','Dania','Danica','Danice','Daniela','Daniele','Daniella','Danielle','Danika','Danila','Danit','Danita','Danna','Danni','Dannie','Danny','Dannye','Danya','Danyelle','Danyette','Daphene','Daphna','Daphne','Dara','Darb','Darbie','Darby','Darcee','Darcey','Darci','Darcie','Darcy','Darda','Dareen','Darell','Darelle','Dari','Daria','Darice','Darla','Darleen','Darlene','Darline','Darlleen','Daron','Darrelle','Darryl','Darsey','Darsie','Darya','Daryl','Daryn','Dasha','Dasi','Dasie','Dasya','Datha','Daune','Daveen','Daveta','Davida','Davina','Davine','Davita','Dawn','Dawna','Dayle','Dayna','Ddene','De','Deana','Deane','Deanna','Deanne','Deb','Debbi','Debbie','Debby','Debee','Debera','Debi','Debor','Debora','Deborah','Debra','Dede','Dedie','Dedra','Dee','Deeann','Deeanne','Deedee','Deena','Deerdre','Deeyn','Dehlia','Deidre','Deina','Deirdre','Del','Dela','Delcina','Delcine','Delia','Delila','Delilah','Delinda','Dell','Della','Delly','Delora','Delores','Deloria','Deloris','Delphine','Delphinia','Demeter','Demetra','Demetria','Demetris','Dena','Deni','Denice','Denise','Denna','Denni','Dennie','Denny','Deny','Denys','Denyse','Deonne','Desdemona','Desirae','Desiree','Desiri','Deva','Devan','Devi','Devin','Devina','Devinne','Devon','Devondra','Devonna','Devonne','Devora','Di','Diahann','Dian','Diana','Diandra','Diane','Diane-Marie','Dianemarie','Diann','Dianna','Dianne','Diannne','Didi','Dido','Diena','Dierdre','Dina','Dinah','Dinnie','Dinny','Dion','Dione','Dionis','Dionne','Dita','Dix','Dixie','Dniren','Dode','Dodi','Dodie','Dody','Doe','Doll','Dolley','Dolli','Dollie','Dolly','Dolores','Dolorita','Doloritas','Domeniga','Dominga','Domini','Dominica','Dominique','Dona','Donella','Donelle','Donetta','Donia','Donica','Donielle','Donna','Donnamarie','Donni','Donnie','Donny','Dora','Doralia','Doralin','Doralyn','Doralynn','Doralynne','Dore','Doreen','Dorelia','Dorella','Dorelle','Dorena','Dorene','Doretta','Dorette','Dorey','Dori','Doria','Dorian','Dorice','Dorie','Dorine','Doris','Dorisa','Dorise','Dorita','Doro','Dorolice','Dorolisa','Dorotea','Doroteya','Dorothea','Dorothee','Dorothy','Dorree','Dorri','Dorrie','Dorris','Dorry','Dorthea','Dorthy','Dory','Dosi','Dot','Doti','Dotti','Dottie','Dotty','Dre','Dreddy','Dredi','Drona','Dru','Druci','Drucie','Drucill','Drucy','Drusi','Drusie','Drusilla','Drusy','Dulce','Dulcea','Dulci','Dulcia','Dulciana','Dulcie','Dulcine','Dulcinea','Dulcy','Dulsea','Dusty','Dyan','Dyana','Dyane','Dyann','Dyanna','Dyanne','Dyna','Dynah','Eachelle','Eada','Eadie','Eadith','Ealasaid','Eartha','Easter','Eba','Ebba','Ebonee','Ebony','Eda','Eddi','Eddie','Eddy','Ede','Edee','Edeline','Eden','Edi','Edie','Edin','Edita','Edith','Editha','Edithe','Ediva','Edna','Edwina','Edy','Edyth','Edythe','Effie','Eileen','Eilis','Eimile','Eirena','Ekaterina','Elaina','Elaine','Elana','Elane','Elayne','Elberta','Elbertina','Elbertine','Eleanor','Eleanora','Eleanore','Electra','Eleen','Elena','Elene','Eleni','Elenore','Eleonora','Eleonore','Elfie','Elfreda','Elfrida','Elfrieda','Elga','Elianora','Elianore','Elicia','Elie','Elinor','Elinore','Elisa','Elisabet','Elisabeth','Elisabetta','Elise','Elisha','Elissa','Elita','Eliza','Elizabet','Elizabeth','Elka','Elke','Ella','Elladine','Elle','Ellen','Ellene','Ellette','Elli','Ellie','Ellissa','Elly','Ellyn','Ellynn','Elmira','Elna','Elnora','Elnore','Eloisa','Eloise','Elonore','Elora','Elsa','Elsbeth','Else','Elset','Elsey','Elsi','Elsie','Elsinore','Elspeth','Elsy','Elva','Elvera','Elvina','Elvira','Elwira','Elyn','Elyse','Elysee','Elysha','Elysia','Elyssa','Em','Ema','Emalee','Emalia','Emelda','Emelia','Emelina','Emeline','Emelita','Emelyne','Emera','Emilee','Emili','Emilia','Emilie','Emiline','Emily','Emlyn','Emlynn','Emlynne','Emma','Emmalee','Emmaline','Emmalyn','Emmalynn','Emmalynne','Emmeline','Emmey','Emmi','Emmie','Emmy','Emmye','Emogene','Emyle','Emylee','Engracia','Enid','Enrica','Enrichetta','Enrika','Enriqueta','Eolanda','Eolande','Eran','Erda','Erena','Erica','Ericha','Ericka','Erika','Erin','Erina','Erinn','Erinna','Erma','Ermengarde','Ermentrude','Ermina','Erminia','Erminie','Erna','Ernaline','Ernesta','Ernestine','Ertha','Eryn','Esma','Esmaria','Esme','Esmeralda','Essa','Essie','Essy','Esta','Estel','Estele','Estell','Estella','Estelle','Ester','Esther','Estrella','Estrellita','Ethel','Ethelda','Ethelin','Ethelind','Etheline','Ethelyn','Ethyl','Etta','Etti','Ettie','Etty','Eudora','Eugenia','Eugenie','Eugine','Eula','Eulalie','Eunice','Euphemia','Eustacia','Eva','Evaleen','Evangelia','Evangelin','Evangelina','Evangeline','Evania','Evanne','Eve','Eveleen','Evelina','Eveline','Evelyn','Evey','Evie','Evita','Evonne','Evvie','Evvy','Evy','Eyde','Eydie','Ezmeralda','Fae','Faina','Faith','Fallon','Fan','Fanchette','Fanchon','Fancie','Fancy','Fanechka','Fania','Fanni','Fannie','Fanny','Fanya','Fara','Farah','Farand','Farica','Farra','Farrah','Farrand','Faun','Faunie','Faustina','Faustine','Fawn','Fawne','Fawnia','Fay','Faydra','Faye','Fayette','Fayina','Fayre','Fayth','Faythe','Federica','Fedora','Felecia','Felicdad','Felice','Felicia','Felicity','Felicle','Felipa','Felisha','Felita','Feliza','Fenelia','Feodora','Ferdinanda','Ferdinande','Fern','Fernanda','Fernande','Fernandina','Ferne','Fey','Fiann','Fianna','Fidela','Fidelia','Fidelity','Fifi','Fifine','Filia','Filide','Filippa','Fina','Fiona','Fionna','Fionnula','Fiorenze','Fleur','Fleurette','Flo','Flor','Flora','Florance','Flore','Florella','Florence','Florencia','Florentia','Florenza','Florette','Flori','Floria','Florida','Florie','Florina','Florinda','Floris','Florri','Florrie','Florry','Flory','Flossi','Flossie','Flossy','Flss','Fran','Francene','Frances','Francesca','Francine','Francisca','Franciska','Francoise','Francyne','Frank','Frankie','Franky','Franni','Frannie','Franny','Frayda','Fred','Freda','Freddi','Freddie','Freddy','Fredelia','Frederica','Fredericka','Frederique','Fredi','Fredia','Fredra','Fredrika','Freida','Frieda','Friederike','Fulvia','Gabbey','Gabbi','Gabbie','Gabey','Gabi','Gabie','Gabriel','Gabriela','Gabriell','Gabriella','Gabrielle','Gabriellia','Gabrila','Gaby','Gae','Gael','Gail','Gale','Gale','Galina','Garland','Garnet','Garnette','Gates','Gavra','Gavrielle','Gay','Gaye','Gayel','Gayla','Gayle','Gayleen','Gaylene','Gaynor','Gelya','Gena','Gene','Geneva','Genevieve','Genevra','Genia','Genna','Genni','Gennie','Gennifer','Genny','Genovera','Genvieve','George','Georgeanna','Georgeanne','Georgena','Georgeta','Georgetta','Georgette','Georgia','Georgiana','Georgianna','Georgianne','Georgie','Georgina','Georgine','Geralda','Geraldine','Gerda','Gerhardine','Geri','Gerianna','Gerianne','Gerladina','Germain','Germaine','Germana','Gerri','Gerrie','Gerrilee','Gerry','Gert','Gerta','Gerti','Gertie','Gertrud','Gertruda','Gertrude','Gertrudis','Gerty','Giacinta','Giana','Gianina','Gianna','Gigi','Gilberta','Gilberte','Gilbertina','Gilbertine','Gilda','Gilemette','Gill','Gillan','Gilli','Gillian','Gillie','Gilligan','Gilly','Gina','Ginelle','Ginevra','Ginger','Ginni','Ginnie','Ginnifer','Ginny','Giorgia','Giovanna','Gipsy','Giralda','Gisela','Gisele','Gisella','Giselle','Giuditta','Giulia','Giulietta','Giustina','Gizela','Glad','Gladi','Gladys','Gleda','Glen','Glenda','Glenine','Glenn','Glenna','Glennie','Glennis','Glori','Gloria','Gloriana','Gloriane','Glory','Glyn','Glynda','Glynis','Glynnis','Gnni','Godiva','Golda','Goldarina','Goldi','Goldia','Goldie','Goldina','Goldy','Grace','Gracia','Gracie','Grata','Gratia','Gratiana','Gray','Grayce','Grazia','Greer','Greta','Gretal','Gretchen','Grete','Gretel','Grethel','Gretna','Gretta','Grier','Griselda','Grissel','Guendolen','Guenevere','Guenna','Guglielma','Gui','Guillema','Guillemette','Guinevere','Guinna','Gunilla','Gus','Gusella','Gussi','Gussie','Gussy','Gusta','Gusti','Gustie','Gusty','Gwen','Gwendolen','Gwendolin','Gwendolyn','Gweneth','Gwenette','Gwenneth','Gwenni','Gwennie','Gwenny','Gwenora','Gwenore','Gwyn','Gwyneth','Gwynne','Gypsy','Hadria','Hailee','Haily','Haleigh','Halette','Haley','Hali','Halie','Halimeda','Halley','Halli','Hallie','Hally','Hana','Hanna','Hannah','Hanni','Hannie','Hannis','Hanny','Happy','Harlene','Harley','Harli','Harlie','Harmonia','Harmonie','Harmony','Harri','Harrie','Harriet','Harriett','Harrietta','Harriette','Harriot','Harriott','Hatti','Hattie','Hatty','Hayley','Hazel','Heath','Heather','Heda','Hedda','Heddi','Heddie','Hedi','Hedvig','Hedvige','Hedwig','Hedwiga','Hedy','Heida','Heidi','Heidie','Helaina','Helaine','Helen','Helen-Elizabeth','Helena','Helene','Helenka','Helga','Helge','Helli','Heloise','Helsa','Helyn','Hendrika','Henka','Henrie','Henrieta','Henrietta','Henriette','Henryetta','Hephzibah','Hermia','Hermina','Hermine','Herminia','Hermione','Herta','Hertha','Hester','Hesther','Hestia','Hetti','Hettie','Hetty','Hilary','Hilda','Hildagard','Hildagarde','Hilde','Hildegaard','Hildegarde','Hildy','Hillary','Hilliary','Hinda','Holli','Hollie','Holly','Holly-Anne','Hollyanne','Honey','Honor','Honoria','Hope','Horatia','Hortense','Hortensia','Hulda','Hyacinth','Hyacintha','Hyacinthe','Hyacinthia','Hyacinthie','Hynda','Ianthe','Ibbie','Ibby','Ida','Idalia','Idalina','Idaline','Idell','Idelle','Idette','Ileana','Ileane','Ilene','Ilise','Ilka','Illa','Ilsa','Ilse','Ilysa','Ilyse','Ilyssa','Imelda','Imogen','Imogene','Imojean','Ina','Indira','Ines','Inesita','Inessa','Inez','Inga','Ingaberg','Ingaborg','Inge','Ingeberg','Ingeborg','Inger','Ingrid','Ingunna','Inna','Iolande','Iolanthe','Iona','Iormina','Ira','Irena','Irene','Irina','Iris','Irita','Irma','Isa','Isabel','Isabelita','Isabella','Isabelle','Isadora','Isahella','Iseabal','Isidora','Isis','Isobel','Issi','Issie','Issy','Ivett','Ivette','Ivie','Ivonne','Ivory','Ivy','Izabel','Jacenta','Jacinda','Jacinta','Jacintha','Jacinthe','Jackelyn','Jacki','Jackie','Jacklin','Jacklyn','Jackquelin','Jackqueline','Jacky','Jaclin','Jaclyn','Jacquelin','Jacqueline','Jacquelyn','Jacquelynn','Jacquenetta','Jacquenette','Jacquetta','Jacquette','Jacqui','Jacquie','Jacynth','Jada','Jade','Jaime','Jaimie','Jaine','Jami','Jamie','Jamima','Jammie','Jan','Jana','Janaya','Janaye','Jandy','Jane','Janean','Janeczka','Janeen','Janel','Janela','Janella','Janelle','Janene','Janenna','Janessa','Janet','Janeta','Janetta','Janette','Janeva','Janey','Jania','Janice','Janie','Janifer','Janina','Janine','Janis','Janith','Janka','Janna','Jannel','Jannelle','Janot','Jany','Jaquelin','Jaquelyn','Jaquenetta','Jaquenette','Jaquith','Jasmin','Jasmina','Jasmine','Jayme','Jaymee','Jayne','Jaynell','Jazmin','Jean','Jeana','Jeane','Jeanelle','Jeanette','Jeanie','Jeanine','Jeanna','Jeanne','Jeannette','Jeannie','Jeannine','Jehanna','Jelene','Jemie','Jemima','Jemimah','Jemmie','Jemmy','Jen','Jena','Jenda','Jenelle','Jeni','Jenica','Jeniece','Jenifer','Jeniffer','Jenilee','Jenine','Jenn','Jenna','Jennee','Jennette','Jenni','Jennica','Jennie','Jennifer','Jennilee','Jennine','Jenny','Jeralee','Jere','Jeri','Jermaine','Jerrie','Jerrilee','Jerrilyn','Jerrine','Jerry','Jerrylee','Jess','Jessa','Jessalin','Jessalyn','Jessamine','Jessamyn','Jesse','Jesselyn','Jessi','Jessica','Jessie','Jessika','Jessy','Jewel','Jewell','Jewelle','Jill','Jillana','Jillane','Jillayne','Jilleen','Jillene','Jilli','Jillian','Jillie','Jilly','Jinny','Jo','Jo Ann','Jo-Ann','Jo-Anne','Joan','Joana','Joane','Joanie','Joann','Joanna','Joanne','Joannes','Jobey','Jobi','Jobie','Jobina','Joby','Jobye','Jobyna','Jocelin','Joceline','Jocelyn','Jocelyne','Jodee','Jodi','Jodie','Jody','Joeann','Joela','Joelie','Joell','Joella','Joelle','Joellen','Joelly','Joellyn','Joelynn','Joete','Joey','Johanna','Johannah','Johna','Johnath','Johnette','Johnna','Joice','Jojo','Jolee','Joleen','Jolene','Joletta','Joli','Jolie','Joline','Joly','Jolyn','Jolynn','Jonell','Joni','Jonie','Jonis','Jordain','Jordan','Jordana','Jordanna','Jorey','Jori','Jorie','Jorrie','Jorry','Joscelin','Josee','Josefa','Josefina','Josepha','Josephina','Josephine','Josey','Josi','Josie','Josselyn','Josy','Jourdan','Joy','Joya','Joyan','Joyann','Joyce','Joycelin','Joye','Jsandye','Juana','Juanita','Judi','Judie','Judith','Juditha','Judy','Judye','Juieta','Julee','Juli','Julia','Juliana','Juliane','Juliann','Julianna','Julianne','Julie','Julienne','Juliet','Julieta','Julietta','Juliette','Julina','Juline','Julissa','Julita','June','Junette','Junia','Junie','Junina','Justina','Justine','Justinn','Jyoti','Kacey','Kacie','Kacy','Kaela','Kai','Kaia','Kaila','Kaile','Kailey','Kaitlin','Kaitlyn','Kaitlynn','Kaja','Kakalina','Kala','Kaleena','Kali','Kalie','Kalila','Kalina','Kalinda','Kalindi','Kalli','Kally','Kameko','Kamila','Kamilah','Kamillah','Kandace','Kandy','Kania','Kanya','Kara','Kara-Lynn','Karalee','Karalynn','Kare','Karee','Karel','Karen','Karena','Kari','Karia','Karie','Karil','Karilynn','Karin','Karina','Karine','Kariotta','Karisa','Karissa','Karita','Karla','Karlee','Karleen','Karlen','Karlene','Karlie','Karlotta','Karlotte','Karly','Karlyn','Karmen','Karna','Karol','Karola','Karole','Karolina','Karoline','Karoly','Karon','Karrah','Karrie','Karry','Kary','Karyl','Karylin','Karyn','Kasey','Kass','Kassandra','Kassey','Kassi','Kassia','Kassie','Kat','Kata','Katalin','Kate','Katee','Katerina','Katerine','Katey','Kath','Katha','Katharina','Katharine','Katharyn','Kathe','Katherina','Katherine','Katheryn','Kathi','Kathie','Kathleen','Kathlin','Kathrine','Kathryn','Kathryne','Kathy','Kathye','Kati','Katie','Katina','Katine','Katinka','Katleen','Katlin','Katrina','Katrine','Katrinka','Katti','Kattie','Katuscha','Katusha','Katy','Katya','Kay','Kaycee','Kaye','Kayla','Kayle','Kaylee','Kayley','Kaylil','Kaylyn','Keeley','Keelia','Keely','Kelcey','Kelci','Kelcie','Kelcy','Kelila','Kellen','Kelley','Kelli','Kellia','Kellie','Kellina','Kellsie','Kelly','Kellyann','Kelsey','Kelsi','Kelsy','Kendra','Kendre','Kenna','Keri','Keriann','Kerianne','Kerri','Kerrie','Kerrill','Kerrin','Kerry','Kerstin','Kesley','Keslie','Kessia','Kessiah','Ketti','Kettie','Ketty','Kevina','Kevyn','Ki','Kiah','Kial','Kiele','Kiersten','Kikelia','Kiley','Kim','Kimberlee','Kimberley','Kimberli','Kimberly','Kimberlyn','Kimbra','Kimmi','Kimmie','Kimmy','Kinna','Kip','Kipp','Kippie','Kippy','Kira','Kirbee','Kirbie','Kirby','Kiri','Kirsten','Kirsteni','Kirsti','Kirstin','Kirstyn','Kissee','Kissiah','Kissie','Kit','Kitti','Kittie','Kitty','Kizzee','Kizzie','Klara','Klarika','Klarrisa','Konstance','Konstanze','Koo','Kora','Koral','Koralle','Kordula','Kore','Korella','Koren','Koressa','Kori','Korie','Korney','Korrie','Korry','Kris','Krissie','Krissy','Krista','Kristal','Kristan','Kriste','Kristel','Kristen','Kristi','Kristien','Kristin','Kristina','Kristine','Kristy','Kristyn','Krysta','Krystal','Krystalle','Krystle','Krystyna','Kyla','Kyle','Kylen','Kylie','Kylila','Kylynn','Kym','Kynthia','Kyrstin','La.Verne','Lacee','Lacey','Lacie','Lacy','Ladonna','Laetitia','Laina','Lainey','Lana','Lanae','Lane','Lanette','Laney','Lani','Lanie','Lanita','Lanna','Lanni','Lanny','Lara','Laraine','Lari','Larina','Larine','Larisa','Larissa','Lark','Laryssa','Latashia','Latia','Latisha','Latrena','Latrina','Laura','Lauraine','Laural','Lauralee','Laure','Lauree','Laureen','Laurel','Laurella','Lauren','Laurena','Laurene','Lauretta','Laurette','Lauri','Laurianne','Laurice','Laurie','Lauryn','Lavena','Laverna','Laverne','Lavina','Lavinia','Lavinie','Layla','Layne','Layney','Lea','Leah','Leandra','Leann','Leanna','Leanor','Leanora','Lebbie','Leda','Lee','Leeann','Leeanne','Leela','Leelah','Leena','Leesa','Leese','Legra','Leia','Leigh','Leigha','Leila','Leilah','Leisha','Lela','Lelah','Leland','Lelia','Lena','Lenee','Lenette','Lenka','Lenna','Lenora','Lenore','Leodora','Leoine','Leola','Leoline','Leona','Leonanie','Leone','Leonelle','Leonie','Leonora','Leonore','Leontine','Leontyne','Leora','Leshia','Lesley','Lesli','Leslie','Lesly','Lesya','Leta','Lethia','Leticia','Letisha','Letitia','Letizia','Letta','Letti','Lettie','Letty','Lexi','Lexie','Lexine','Lexis','Lexy','Leyla','Lezlie','Lia','Lian','Liana','Liane','Lianna','Lianne','Lib','Libbey','Libbi','Libbie','Libby','Licha','Lida','Lidia','Liesa','Lil','Lila','Lilah','Lilas','Lilia','Lilian','Liliane','Lilias','Lilith','Lilla','Lilli','Lillian','Lillis','Lilllie','Lilly','Lily','Lilyan','Lin','Lina','Lind','Linda','Lindi','Lindie','Lindsay','Lindsey','Lindsy','Lindy','Linea','Linell','Linet','Linette','Linn','Linnea','Linnell','Linnet','Linnie','Linzy','Lira','Lisa','Lisabeth','Lisbeth','Lise','Lisetta','Lisette','Lisha','Lishe','Lissa','Lissi','Lissie','Lissy','Lita','Liuka','Liv','Liva','Livia','Livvie','Livvy','Livvyy','Livy','Liz','Liza','Lizabeth','Lizbeth','Lizette','Lizzie','Lizzy','Loella','Lois','Loise','Lola','Loleta','Lolita','Lolly','Lona','Lonee','Loni','Lonna','Lonni','Lonnie','Lora','Lorain','Loraine','Loralee','Loralie','Loralyn','Loree','Loreen','Lorelei','Lorelle','Loren','Lorena','Lorene','Lorenza','Loretta','Lorette','Lori','Loria','Lorianna','Lorianne','Lorie','Lorilee','Lorilyn','Lorinda','Lorine','Lorita','Lorna','Lorne','Lorraine','Lorrayne','Lorri','Lorrie','Lorrin','Lorry','Lory','Lotta','Lotte','Lotti','Lottie','Lotty','Lou','Louella','Louisa','Louise','Louisette','Loutitia','Lu','Luce','Luci','Lucia','Luciana','Lucie','Lucienne','Lucila','Lucilia','Lucille','Lucina','Lucinda','Lucine','Lucita','Lucky','Lucretia','Lucy','Ludovika','Luella','Luelle','Luisa','Luise','Lula','Lulita','Lulu','Lura','Lurette','Lurleen','Lurlene','Lurline','Lusa','Luz','Lyda','Lydia','Lydie','Lyn','Lynda','Lynde','Lyndel','Lyndell','Lyndsay','Lyndsey','Lyndsie','Lyndy','Lynea','Lynelle','Lynett','Lynette','Lynn','Lynna','Lynne','Lynnea','Lynnell','Lynnelle','Lynnet','Lynnett','Lynnette','Lynsey','Lyssa','Mab','Mabel','Mabelle','Mable','Mada','Madalena','Madalyn','Maddalena','Maddi','Maddie','Maddy','Madel','Madelaine','Madeleine','Madelena','Madelene','Madelin','Madelina','Madeline','Madella','Madelle','Madelon','Madelyn','Madge','Madlen','Madlin','Madonna','Mady','Mae','Maegan','Mag','Magda','Magdaia','Magdalen','Magdalena','Magdalene','Maggee','Maggi','Maggie','Maggy','Mahala','Mahalia','Maia','Maible','Maiga','Maighdiln','Mair','Maire','Maisey','Maisie','Maitilde','Mala','Malanie','Malena','Malia','Malina','Malinda','Malinde','Malissa','Malissia','Mallissa','Mallorie','Mallory','Malorie','Malory','Malva','Malvina','Malynda','Mame','Mamie','Manda','Mandi','Mandie','Mandy','Manon','Manya','Mara','Marabel','Marcela','Marcelia','Marcella','Marcelle','Marcellina','Marcelline','Marchelle','Marci','Marcia','Marcie','Marcile','Marcille','Marcy','Mareah','Maren','Marena','Maressa','Marga','Margalit','Margalo','Margaret','Margareta','Margarete','Margaretha','Margarethe','Margaretta','Margarette','Margarita','Margaux','Marge','Margeaux','Margery','Marget','Margette','Margi','Margie','Margit','Margo','Margot','Margret','Marguerite','Margy','Mari','Maria','Mariam','Marian','Mariana','Mariann','Marianna','Marianne','Maribel','Maribelle','Maribeth','Marice','Maridel','Marie','Marie-Ann','Marie-Jeanne','Marieann','Mariejeanne','Mariel','Mariele','Marielle','Mariellen','Marietta','Mariette','Marigold','Marijo','Marika','Marilee','Marilin','Marillin','Marilyn','Marin','Marina','Marinna','Marion','Mariquilla','Maris','Marisa','Mariska','Marissa','Marita','Maritsa','Mariya','Marj','Marja','Marje','Marji','Marjie','Marjorie','Marjory','Marjy','Marketa','Marla','Marlane','Marleah','Marlee','Marleen','Marlena','Marlene','Marley','Marlie','Marline','Marlo','Marlyn','Marna','Marne','Marney','Marni','Marnia','Marnie','Marquita','Marrilee','Marris','Marrissa','Marsha','Marsiella','Marta','Martelle','Martguerita','Martha','Marthe','Marthena','Marti','Martica','Martie','Martina','Martita','Marty','Martynne','Mary','Marya','Maryann','Maryanna','Maryanne','Marybelle','Marybeth','Maryellen','Maryjane','Maryjo','Maryl','Marylee','Marylin','Marylinda','Marylou','Marylynne','Maryrose','Marys','Marysa','Masha','Matelda','Mathilda','Mathilde','Matilda','Matilde','Matti','Mattie','Matty','Maud','Maude','Maudie','Maura','Maure','Maureen','Maureene','Maurene','Maurine','Maurise','Maurita','Maurizia','Mavis','Mavra','Max','Maxi','Maxie','Maxine','Maxy','May','Maybelle','Maye','Mead','Meade','Meagan','Meaghan','Meara','Mechelle','Meg','Megan','Megen','Meggi','Meggie','Meggy','Meghan','Meghann','Mehetabel','Mei','Mel','Mela','Melamie','Melania','Melanie','Melantha','Melany','Melba','Melesa','Melessa','Melicent','Melina','Melinda','Melinde','Melisa','Melisande','Melisandra','Melisenda','Melisent','Melissa','Melisse','Melita','Melitta','Mella','Melli','Mellicent','Mellie','Mellisa','Mellisent','Melloney','Melly','Melodee','Melodie','Melody','Melonie','Melony','Melosa','Melva','Mercedes','Merci','Mercie','Mercy','Meredith','Meredithe','Meridel','Meridith','Meriel','Merilee','Merilyn','Meris','Merissa','Merl','Merla','Merle','Merlina','Merline','Merna','Merola','Merralee','Merridie','Merrie','Merrielle','Merrile','Merrilee','Merrili','Merrill','Merrily','Merry','Mersey','Meryl','Meta','Mia','Micaela','Michaela','Michaelina','Michaeline','Michaella','Michal','Michel','Michele','Michelina','Micheline','Michell','Michelle','Micki','Mickie','Micky','Midge','Mignon','Mignonne','Miguela','Miguelita','Mikaela','Mil','Mildred','Mildrid','Milena','Milicent','Milissent','Milka','Milli','Millicent','Millie','Millisent','Milly','Milzie','Mimi','Min','Mina','Minda','Mindy','Minerva','Minetta','Minette','Minna','Minnaminnie','Minne','Minni','Minnie','Minnnie','Minny','Minta','Miof.Mela','Miquela','Mira','Mirabel','Mirabella','Mirabelle','Miran','Miranda','Mireielle','Mireille','Mirella','Mirelle','Miriam','Mirilla','Mirna','Misha','Missie','Missy','Misti','Misty','Mitzi','Modesta','Modestia','Modestine','Modesty','Moina','Moira','Moll','Mollee','Molli','Mollie','Molly','Mommy','Mona','Monah','Monica','Monika','Monique','Mora','Moreen','Morena','Morgan','Morgana','Morganica','Morganne','Morgen','Moria','Morissa','Morna','Moselle','Moyna','Moyra','Mozelle','Muffin','Mufi','Mufinella','Muire','Mureil','Murial','Muriel','Murielle','Myra','Myrah','Myranda','Myriam','Myrilla','Myrle','Myrlene','Myrna','Myrta','Myrtia','Myrtice','Myrtie','Myrtle','Nada','Nadean','Nadeen','Nadia','Nadine','Nadiya','Nady','Nadya','Nalani','Nan','Nana','Nananne','Nance','Nancee','Nancey','Nanci','Nancie','Nancy','Nanete','Nanette','Nani','Nanice','Nanine','Nannette','Nanni','Nannie','Nanny','Nanon','Naoma','Naomi','Nara','Nari','Nariko','Nat','Nata','Natala','Natalee','Natalie','Natalina','Nataline','Natalya','Natasha','Natassia','Nathalia','Nathalie','Natividad','Natka','Natty','Neala','Neda','Nedda','Nedi','Neely','Neila','Neile','Neilla','Neille','Nelia','Nelie','Nell','Nelle','Nelli','Nellie','Nelly','Nerissa','Nerita','Nert','Nerta','Nerte','Nerti','Nertie','Nerty','Nessa','Nessi','Nessie','Nessy','Nesta','Netta','Netti','Nettie','Nettle','Netty','Nevsa','Neysa','Nichol','Nichole','Nicholle','Nicki','Nickie','Nicky','Nicol','Nicola','Nicole','Nicolea','Nicolette','Nicoli','Nicolina','Nicoline','Nicolle','Nikaniki','Nike','Niki','Nikki','Nikkie','Nikoletta','Nikolia','Nina','Ninetta','Ninette','Ninnetta','Ninnette','Ninon','Nissa','Nisse','Nissie','Nissy','Nita','Nixie','Noami','Noel','Noelani','Noell','Noella','Noelle','Noellyn','Noelyn','Noemi','Nola','Nolana','Nolie','Nollie','Nomi','Nona','Nonah','Noni','Nonie','Nonna','Nonnah','Nora','Norah','Norean','Noreen','Norene','Norina','Norine','Norma','Norri','Norrie','Norry','Novelia','Nydia','Nyssa','Octavia','Odele','Odelia','Odelinda','Odella','Odelle','Odessa','Odetta','Odette','Odilia','Odille','Ofelia','Ofella','Ofilia','Ola','Olenka','Olga','Olia','Olimpia','Olive','Olivette','Olivia','Olivie','Oliy','Ollie','Olly','Olva','Olwen','Olympe','Olympia','Olympie','Ondrea','Oneida','Onida','Oona','Opal','Opalina','Opaline','Ophelia','Ophelie','Ora','Oralee','Oralia','Oralie','Oralla','Oralle','Orel','Orelee','Orelia','Orelie','Orella','Orelle','Oriana','Orly','Orsa','Orsola','Ortensia','Otha','Othelia','Othella','Othilia','Othilie','Ottilie','Page','Paige','Paloma','Pam','Pamela','Pamelina','Pamella','Pammi','Pammie','Pammy','Pandora','Pansie','Pansy','Paola','Paolina','Papagena','Pat','Patience','Patrica','Patrice','Patricia','Patrizia','Patsy','Patti','Pattie','Patty','Paula','Paule','Pauletta','Paulette','Pauli','Paulie','Paulina','Pauline','Paulita','Pauly','Pavia','Pavla','Pearl','Pearla','Pearle','Pearline','Peg','Pegeen','Peggi','Peggie','Peggy','Pen','Penelopa','Penelope','Penni','Pennie','Penny','Pepi','Pepita','Peri','Peria','Perl','Perla','Perle','Perri','Perrine','Perry','Persis','Pet','Peta','Petra','Petrina','Petronella','Petronia','Petronilla','Petronille','Petunia','Phaedra','Phaidra','Phebe','Phedra','Phelia','Phil','Philipa','Philippa','Philippe','Philippine','Philis','Phillida','Phillie','Phillis','Philly','Philomena','Phoebe','Phylis','Phyllida','Phyllis','Phyllys','Phylys','Pia','Pier','Pierette','Pierrette','Pietra','Piper','Pippa','Pippy','Polly','Pollyanna','Pooh','Poppy','Portia','Pris','Prisca','Priscella','Priscilla','Prissie','Pru','Prudence','Prudi','Prudy','Prue','Queenie','Quentin','Querida','Quinn','Quinta','Quintana','Quintilla','Quintina','Rachael','Rachel','Rachele','Rachelle','Rae','Raeann','Raf','Rafa','Rafaela','Rafaelia','Rafaelita','Rahal','Rahel','Raina','Raine','Rakel','Ralina','Ramona','Ramonda','Rana','Randa','Randee','Randene','Randi','Randie','Randy','Ranee','Rani','Rania','Ranice','Ranique','Ranna','Raphaela','Raquel','Raquela','Rasia','Rasla','Raven','Ray','Raychel','Raye','Rayna','Raynell','Rayshell','Rea','Reba','Rebbecca','Rebe','Rebeca','Rebecca','Rebecka','Rebeka','Rebekah','Rebekkah','Ree','Reeba','Reena','Reeta','Reeva','Regan','Reggi','Reggie','Regina','Regine','Reiko','Reina','Reine','Remy','Rena','Renae','Renata','Renate','Rene','Renee','Renell','Renelle','Renie','Rennie','Reta','Retha','Revkah','Rey','Reyna','Rhea','Rheba','Rheta','Rhetta','Rhiamon','Rhianna','Rhianon','Rhoda','Rhodia','Rhodie','Rhody','Rhona','Rhonda','Riane','Riannon','Rianon','Rica','Ricca','Rici','Ricki','Rickie','Ricky','Riki','Rikki','Rina','Risa','Rita','Riva','Rivalee','Rivi','Rivkah','Rivy','Roana','Roanna','Roanne','Robbi','Robbie','Robbin','Robby','Robbyn','Robena','Robenia','Roberta','Robin','Robina','Robinet','Robinett','Robinetta','Robinette','Robinia','Roby','Robyn','Roch','Rochell','Rochella','Rochelle','Rochette','Roda','Rodi','Rodie','Rodina','Rois','Romola','Romona','Romonda','Romy','Rona','Ronalda','Ronda','Ronica','Ronna','Ronni','Ronnica','Ronnie','Ronny','Roobbie','Rora','Rori','Rorie','Rory','Ros','Rosa','Rosabel','Rosabella','Rosabelle','Rosaleen','Rosalia','Rosalie','Rosalind','Rosalinda','Rosalinde','Rosaline','Rosalyn','Rosalynd','Rosamond','Rosamund','Rosana','Rosanna','Rosanne','Rose','Roseann','Roseanna','Roseanne','Roselia','Roselin','Roseline','Rosella','Roselle','Rosemaria','Rosemarie','Rosemary','Rosemonde','Rosene','Rosetta','Rosette','Roshelle','Rosie','Rosina','Rosita','Roslyn','Rosmunda','Rosy','Row','Rowe','Rowena','Roxana','Roxane','Roxanna','Roxanne','Roxi','Roxie','Roxine','Roxy','Roz','Rozalie','Rozalin','Rozamond','Rozanna','Rozanne','Roze','Rozele','Rozella','Rozelle','Rozina','Rubetta','Rubi','Rubia','Rubie','Rubina','Ruby','Ruperta','Ruth','Ruthann','Ruthanne','Ruthe','Ruthi','Ruthie','Ruthy','Ryann','Rycca','Saba','Sabina','Sabine','Sabra','Sabrina','Sacha','Sada','Sadella','Sadie','Sadye','Saidee','Sal','Salaidh','Sallee','Salli','Sallie','Sally','Sallyann','Sallyanne','Saloma','Salome','Salomi','Sam','Samantha','Samara','Samaria','Sammy','Sande','Sandi','Sandie','Sandra','Sandy','Sandye','Sapphira','Sapphire','Sara','Sara-Ann','Saraann','Sarah','Sarajane','Saree','Sarena','Sarene','Sarette','Sari','Sarina','Sarine','Sarita','Sascha','Sasha','Sashenka','Saudra','Saundra','Savina','Sayre','Scarlet','Scarlett','Sean','Seana','Seka','Sela','Selena','Selene','Selestina','Selia','Selie','Selina','Selinda','Seline','Sella','Selle','Selma','Sena','Sephira','Serena','Serene','Shae','Shaina','Shaine','Shalna','Shalne','Shana','Shanda','Shandee','Shandeigh','Shandie','Shandra','Shandy','Shane','Shani','Shanie','Shanna','Shannah','Shannen','Shannon','Shanon','Shanta','Shantee','Shara','Sharai','Shari','Sharia','Sharity','Sharl','Sharla','Sharleen','Sharlene','Sharline','Sharon','Sharona','Sharron','Sharyl','Shaun','Shauna','Shawn','Shawna','Shawnee','Shay','Shayla','Shaylah','Shaylyn','Shaylynn','Shayna','Shayne','Shea','Sheba','Sheela','Sheelagh','Sheelah','Sheena','Sheeree','Sheila','Sheila-Kathryn','Sheilah','Shel','Shela','Shelagh','Shelba','Shelbi','Shelby','Shelia','Shell','Shelley','Shelli','Shellie','Shelly','Shena','Sher','Sheree','Sheri','Sherie','Sherill','Sherilyn','Sherline','Sherri','Sherrie','Sherry','Sherye','Sheryl','Shina','Shir','Shirl','Shirlee','Shirleen','Shirlene','Shirley','Shirline','Shoshana','Shoshanna','Siana','Sianna','Sib','Sibbie','Sibby','Sibeal','Sibel','Sibella','Sibelle','Sibilla','Sibley','Sibyl','Sibylla','Sibylle','Sidoney','Sidonia','Sidonnie','Sigrid','Sile','Sileas','Silva','Silvana','Silvia','Silvie','Simona','Simone','Simonette','Simonne','Sindee','Siobhan','Sioux','Siouxie','Sisely','Sisile','Sissie','Sissy','Siusan','Sofia','Sofie','Sondra','Sonia','Sonja','Sonni','Sonnie','Sonnnie','Sonny','Sonya','Sophey','Sophi','Sophia','Sophie','Sophronia','Sorcha','Sosanna','Stace','Stacee','Stacey','Staci','Stacia','Stacie','Stacy','Stafani','Star','Starla','Starlene','Starlin','Starr','Stefa','Stefania','Stefanie','Steffane','Steffi','Steffie','Stella','Stepha','Stephana','Stephani','Stephanie','Stephannie','Stephenie','Stephi','Stephie','Stephine','Stesha','Stevana','Stevena','Stoddard','Storm','Stormi','Stormie','Stormy','Sue','Suellen','Sukey','Suki','Sula','Sunny','Sunshine','Susan','Susana','Susanetta','Susann','Susanna','Susannah','Susanne','Susette','Susi','Susie','Susy','Suzann','Suzanna','Suzanne','Suzette','Suzi','Suzie','Suzy','Sybil','Sybila','Sybilla','Sybille','Sybyl','Sydel','Sydelle','Sydney','Sylvia','Tabatha','Tabbatha','Tabbi','Tabbie','Tabbitha','Tabby','Tabina','Tabitha','Taffy','Talia','Tallia','Tallie','Tallou','Tallulah','Tally','Talya','Talyah','Tamar','Tamara','Tamarah','Tamarra','Tamera','Tami','Tamiko','Tamma','Tammara','Tammi','Tammie','Tammy','Tamqrah','Tamra','Tana','Tandi','Tandie','Tandy','Tanhya','Tani','Tania','Tanitansy','Tansy','Tanya','Tara','Tarah','Tarra','Tarrah','Taryn','Tasha','Tasia','Tate','Tatiana','Tatiania','Tatum','Tawnya','Tawsha','Ted','Tedda','Teddi','Teddie','Teddy','Tedi','Tedra','Teena','TEirtza','Teodora','Tera','Teresa','Terese','Teresina','Teresita','Teressa','Teri','Teriann','Terra','Terri','Terrie','Terrijo','Terry','Terrye','Tersina','Terza','Tess','Tessa','Tessi','Tessie','Tessy','Thalia','Thea','Theadora','Theda','Thekla','Thelma','Theo','Theodora','Theodosia','Theresa','Therese','Theresina','Theresita','Theressa','Therine','Thia','Thomasa','Thomasin','Thomasina','Thomasine','Tiena','Tierney','Tiertza','Tiff','Tiffani','Tiffanie','Tiffany','Tiffi','Tiffie','Tiffy','Tilda','Tildi','Tildie','Tildy','Tillie','Tilly','Tim','Timi','Timmi','Timmie','Timmy','Timothea','Tina','Tine','Tiphani','Tiphanie','Tiphany','Tish','Tisha','Tobe','Tobey','Tobi','Toby','Tobye','Toinette','Toma','Tomasina','Tomasine','Tomi','Tommi','Tommie','Tommy','Toni','Tonia','Tonie','Tony','Tonya','Tonye','Tootsie','Torey','Tori','Torie','Torrie','Tory','Tova','Tove','Tracee','Tracey','Traci','Tracie','Tracy','Trenna','Tresa','Trescha','Tressa','Tricia','Trina','Trish','Trisha','Trista','Trix','Trixi','Trixie','Trixy','Truda','Trude','Trudey','Trudi','Trudie','Trudy','Trula','Tuesday','Twila','Twyla','Tybi','Tybie','Tyne','Ula','Ulla','Ulrica','Ulrika','Ulrikaumeko','Ulrike','Umeko','Una','Ursa','Ursala','Ursola','Ursula','Ursulina','Ursuline','Uta','Val','Valaree','Valaria','Vale','Valeda','Valencia','Valene','Valenka','Valentia','Valentina','Valentine','Valera','Valeria','Valerie','Valery','Valerye','Valida','Valina','Valli','Vallie','Vally','Valma','Valry','Van','Vanda','Vanessa','Vania','Vanna','Vanni','Vannie','Vanny','Vanya','Veda','Velma','Velvet','Venita','Venus','Vera','Veradis','Vere','Verena','Verene','Veriee','Verile','Verina','Verine','Verla','Verna','Vernice','Veronica','Veronika','Veronike','Veronique','Vevay','Vi','Vicki','Vickie','Vicky','Victoria','Vida','Viki','Vikki','Vikky','Vilhelmina','Vilma','Vin','Vina','Vinita','Vinni','Vinnie','Vinny','Viola','Violante','Viole','Violet','Violetta','Violette','Virgie','Virgina','Virginia','Virginie','Vita','Vitia','Vitoria','Vittoria','Viv','Viva','Vivi','Vivia','Vivian','Viviana','Vivianna','Vivianne','Vivie','Vivien','Viviene','Vivienne','Viviyan','Vivyan','Vivyanne','Vonni','Vonnie','Vonny','Vyky','Wallie','Wallis','Walliw','Wally','Waly','Wanda','Wandie','Wandis','Waneta','Wanids','Wenda','Wendeline','Wendi','Wendie','Wendy','Wendye','Wenona','Wenonah','Whitney','Wileen','Wilhelmina','Wilhelmine','Wilie','Willa','Willabella','Willamina','Willetta','Willette','Willi','Willie','Willow','Willy','Willyt','Wilma','Wilmette','Wilona','Wilone','Wilow','Windy','Wini','Winifred','Winna','Winnah','Winne','Winni','Winnie','Winnifred','Winny','Winona','Winonah','Wren','Wrennie','Wylma','Wynn','Wynne','Wynnie','Wynny','Xaviera','Xena','Xenia','Xylia','Xylina','Yalonda','Yasmeen','Yasmin','Yelena','Yetta','Yettie','Yetty','Yevette','Ynes','Ynez','Yoko','Yolanda','Yolande','Yolane','Yolanthe','Yoshi','Yoshiko','Yovonnda','Ysabel','Yvette','Yvonne','Zabrina','Zahara','Zandra','Zaneta','Zara','Zarah','Zaria','Zarla','Zea','Zelda','Zelma','Zena','Zenia','Zia','Zilvia','Zita','Zitella','Zoe','Zola','Zonda','Zondra','Zonnya','Zora','Zorah','Zorana','Zorina','Zorine','Zsa Zsa','Zsazsa','Zulema','Zuzana']
surnames = ["Aaberg","Aalst","Aara","Aaren","Aarika","Aaron","Aaronson","Ab","Aba","Abad","Abagael","Abagail","Abana","Abate","Abba","Abbate","Abbe","Abbey","Abbi","Abbie","Abbot","Abbotsen","Abbotson","Abbotsun","Abbott","Abbottson","Abby","Abbye","Abdel","Abdella","Abdu","Abdul","Abdulla","Abe","Abebi","Abel","Abelard","Abell","Abercromby","Abernathy","Abernon","Abert","Abeu","Abey","Abie","Abigael","Abigail","Abigale","Abijah","Abisha","Abisia","Abixah","Abner","Aborn","Abott","Abra","Abraham","Abrahams","Abrahamsen","Abrahan","Abram","Abramo","Abrams","Abramson","Abran","Abroms","Absa","Absalom","Abshier","Acacia","Acalia","Accalia","Ace","Acey","Acherman","Achilles","Achorn","Acie","Acima","Acker","Ackerley","Ackerman","Ackler","Ackley","Acquah","Acus","Ad","Ada","Adabel","Adabelle","Adachi","Adah","Adaha","Adai","Adaiha","Adair","Adal","Adala","Adalai","Adalard","Adalbert","Adalheid","Adali","Adalia","Adaliah","Adalie","Adaline","Adall","Adallard","Adam","Adama","Adamec","Adamek","Adamik","Adamina","Adaminah","Adamis","Adamo","Adamok","Adams","Adamsen","Adamski","Adamson","Adamsun","Adan","Adao","Adar","Adara","Adaurd","Aday","Adda","Addam","Addi","Addia","Addie","Addiego","Addiel","Addis","Addison","Addy","Ade","Adebayo","Adel","Adela","Adelaida","Adelaide","Adelaja","Adelbert","Adele","Adelheid","Adelia","Adelice","Adelina","Adelind","Adeline","Adella","Adelle","Adelpho","Adelric","Adena","Ader","Adest","Adey","Adham","Adhamh","Adhern","Adi","Adiana","Adiel","Adiell","Adigun","Adila","Adim","Adin","Adina","Adine","Adis","Adkins","Adlai","Adlar","Adlare","Adlay","Adlee","Adlei","Adler","Adley","Adna","Adnah","Adne","Adnopoz","Ado","Adolf","Adolfo","Adolph","Adolphe","Adolpho","Adolphus","Adon","Adonis","Adora","Adore","Adoree","Adorl","Adorne","Adrea","Adrell","Adria","Adriaens","Adrial","Adrian","Adriana","Adriane","Adrianna","Adrianne","Adriano","Adriel","Adriell","Adrien","Adriena","Adriene","Adrienne","Adur","Aekerly","Aelber","Aenea","Aeneas","Aeneus","Aeniah","Aenneea","Aeriel","Aeriela","Aeriell","Affer","Affra","Affrica","Afra","Africa","Africah","Afrika","Afrikah","Afton","Ag","Agace","Agamemnon","Agan","Agata","Agate","Agatha","Agathe","Agathy","Agbogla","Agee","Aggappe","Aggappera","Aggappora","Aggarwal","Aggi","Aggie","Aggri","Aggy","Agle","Agler","Agna","Agnella","Agnes","Agnese","Agnesse","Agneta","Agnew","Agnola","Agostino","Agosto","Agretha","Agripina","Agrippina","Aguayo","Agueda","Aguie","Aguste","Agustin","Ahab","Aharon","Ahasuerus","Ahders","Ahearn","Ahern","Ahl","Ahlgren","Ahmad","Ahmar","Ahmed","Ahola","Aholah","Aholla","Ahoufe","Ahouh","Ahrendt","Ahrens","Ahron","Aia","Aida","Aidan","Aiden","Aiello","Aigneis","Aiken","Aila","Ailbert","Aile","Ailee","Aileen","Ailene","Ailey","Aili","Ailin","Ailina","Ailis","Ailsa","Ailssa","Ailsun","Ailyn","Aime","Aimee","Aimil","Aimo","Aindrea","Ainslee","Ainsley","Ainslie","Ainsworth","Airel","Aires","Airla","Airlee","Airlia","Airliah","Airlie","Aisha","Ajani","Ajax","Ajay","Ajit","Akanke","Akel","Akela","Aker","Akerboom","Akerley","Akers","Akeyla","Akeylah","Akili","Akim","Akin","Akins","Akira","Aklog","Aksel","Aksoyn","Al","Alabaster","Alage","Alain","Alaine","Alair","Alake","Alameda","Alan","Alana","Alanah","Aland","Alane","Alanna","Alano","Alansen","Alanson","Alard","Alaric","Alarice","Alarick","Alarise","Alasdair","Alastair","Alasteir","Alaster","Alatea","Alathia","Alayne","Alba","Alban","Albarran","Albemarle","Alben","Alber","Alberic","Alberik","Albers","Albert","Alberta","Albertina","Albertine","Alberto","Albertson","Albie","Albin","Albina","Albion","Alboran","Albrecht","Albric","Albright","Albur","Alburg","Alburga","Alby","Alcina","Alcine","Alcinia","Alcock","Alcot","Alcott","Alcus","Alda","Aldarcie","Aldarcy","Aldas","Alded","Alden","Aldercy","Alderman","Alderson","Aldin","Aldis","Aldo","Aldon","Aldora","Aldos","Aldous","Aldred","Aldredge","Aldric","Aldrich","Aldridge","Alduino","Aldus","Aldwin","Aldwon","Alec","Alecia","Aleck","Aleda","Aleece","Aleedis","Aleen","Aleetha","Alegre","Alejandra","Alejandrina","Alejandro","Alejo","Alejoa","Alek","Aleksandr","Alena","Alene","Alenson","Aleras","Aleris","Aleron","Alesandrini","Alessandra","Alessandro","Aleta","Aletha","Alethea","Alethia","Aletta","Alex","Alexa","Alexander","Alexandr","Alexandra","Alexandre","Alexandria","Alexandrina","Alexandro","Alexandros","Alexei","Alexi","Alexia","Alexina","Alexine","Alexio","Alexis","Aley","Aleydis","Alf","Alfeus","Alfi","Alfie","Alfons","Alfonse","Alfonso","Alfonzo","Alford","Alfred","Alfreda","Alfredo","Alfy","Algar","Alger","Algernon","Algie","Alguire","Algy","Ali","Alia","Aliber","Alic","Alica","Alice","Alicea","Alicia","Alick","Alida","Alidia","Alidis","Alidus","Alie","Alika","Alikee","Alina","Aline","Alinna","Alis","Alisa","Alisan","Alisander","Alisen","Alisha","Alisia","Alison","Alissa","Alistair","Alister","Alisun","Alita","Alitha","Alithea","Alithia","Alitta","Alius","Alix","Aliza","Alla","Allain","Allan","Allana","Allanson","Allard","Allare","Allayne","Allbee","Allcot","Alleen","Allegra","Allen","Allene","Alleras","Allerie","Alleris","Allerus","Alley","Alleyn","Alleyne","Alli","Allianora","Alliber","Allie","Allin","Allina","Allis","Allisan","Allison","Allissa","Allista","Allister","Allistir","Allix","Allmon","Allred","Allrud","Allsopp","Allsun","Allveta","Allwein","Allx","Ally","Allyce","Allyn","Allys","Allyson","Alma","Almallah","Almeda","Almeeta","Almeida","Almena","Almeria","Almeta","Almira","Almire","Almita","Almond","Almund","Alo","Alodee","Alodi","Alodie","Aloin","Aloise","Aloisia","Aloisius","Aloke","Alon","Alonso","Alonzo","Aloysia","Aloysius","Alper","Alpers","Alpert","Alphard","Alpheus","Alphonsa","Alphonse","Alphonsine","Alphonso","AlrZc","Alric","Alrich","Alrick","Alroi","Alroy","Also","Alston","Alsworth","Alta","Altaf","Alten","Althea","Althee","Altheta","Altis","Altman","Alton","Aluin","Aluino","Alurd","Alurta","Alva","Alvan","Alvar","Alvarez","Alver","Alvera","Alverson","Alverta","Alves","Alveta","Alviani","Alvie","Alvin","Alvina","Alvinia","Alvira","Alvis","Alvita","Alvord","Alvy","Alwin","Alwitt","Alwyn","Alyce","Alyda","Alyose","Alyosha","Alys","Alysa","Alyse","Alysia","Alyson","Alysoun","Alyss","Alyssa","Alyworth","Ama","Amabel","Amabelle","Amabil","Amadas","Amadeo","Amadeus","Amadis","Amado","Amador","Amadus","Amal","Amalbena","Amalberga","Amalbergas","Amalburga","Amalea","Amalee","Amaleta","Amalia","Amalie","Amalita","Amalle","Aman","Amand","Amanda","Amandi","Amandie","Amando","Amandy","Amann","Amar","Amara","Amaral","Amaras","Amarette","Amargo","Amari","Amarillas","Amarillis","Amaris","Amary","Amaryl","Amaryllis","Amasa","Amata","Amathist","Amathiste","Amati","Amato","Amatruda","Amaty","Amber","Amberly","Ambert","Ambie","Amble","Ambler","Ambrogino","Ambrogio","Ambros","Ambrosane","Ambrose","Ambrosi","Ambrosia","Ambrosine","Ambrosio","Ambrosius","Ambur","Amby","Ame","Amedeo","Amelia","Amelie","Amelina","Ameline","Amelita","Amena","Amend","Amerigo","Amero","Amersham","Amery","Ames","Amethist","Amethyst","Ami","Amias","Amice","Amick","Amie","Amiel","Amieva","Amii","Amil","Amin","Aminta","Amir","Amitie","Amity","Amling","Ammadas","Ammadis","Ammamaria","Ammann","Ammon","Amoakuh","Amor","Amora","Amoreta","Amorete","Amorette","Amorita","Amoritta","Amory","Amos","Amr","Amrita","Amsden","Amund","Amy","Amyas","Amye","An","Ana","Anabal","Anabel","Anabella","Anabelle","Anagnos","Analiese","Analise","Anallese","Anallise","Anana","Ananna","Anastas","Anastase","Anastasia","Anastasie","Anastasio","Anastasius","Anastassia","Anastatius","Anastice","Anastos","Anatol","Anatola","Anatole","Anatolio","Anatollo","Ancalin","Ancel","Ancelin","Anceline","Ancell","Anchie","Ancier","Ancilin","Andee","Andeee","Andel","Ander","Anderea","Anderegg","Anderer","Anders","Andersen","Anderson","Andert","Andi","Andie","Andonis","Andra","Andrade","Andras","Andre","Andrea","Andreana","Andreas","Andree","Andrei","Andrej","Andrel","Andres","Andrew","Andrews","Andrey","Andri","Andria","Andriana","Andrien","Andriette","Andris","Andromache","Andromada","Andromeda","Andromede","Andros","Androw","Andrus","Andryc","Andy","Anestassia","Anet","Anett","Anetta","Anette","Aney","Angadreme","Angadresma","Ange","Angel","Angela","Angele","Angeli","Angelia","Angelica","Angelico","Angelika","Angelina","Angeline","Angelique","Angelis","Angelita","Angell","Angelle","Angelo","Angi","Angie","Angil","Angle","Anglim","Anglo","Angrist","Angus","Angy","Anh","Ania","Aniakudo","Anica","Aniela","Anil","Anis","Anissa","Anita","Anitra","Aniweta","Anjali","Anjanette","Anjela","Ankeny","Ankney","Ann","Ann-Marie","Anna","Anna-Diana","Anna-Diane","Anna-Maria","Annabal","Annabel","Annabela","Annabell","Annabella","Annabelle","Annadiana","Annadiane","Annalee","Annaliese","Annalise","Annamaria","Annamarie","Anne","Anne-Corinne","Anne-Marie","Annecorinne","Anneliese","Annelise","Annemarie","Annetta","Annette","Anni","Annia","Annice","Annie","Anniken","Annis","Annissa","Annmaria","Annmarie","Annnora","Annora","Annorah","Annunciata","Anny","Anora","Anse","Ansel","Ansela","Ansell","Anselm","Anselma","Anselme","Anselmi","Anselmo","Ansilma","Ansilme","Ansley","Anson","Anstice","Anstus","Antebi","Anthe","Anthea","Anthia","Anthiathia","Anthony","Antin","Antipas","Antipus","Antoine","Antoinetta","Antoinette","Anton","Antone","Antonella","Antonetta","Antoni","Antonia","Antonie","Antonietta","Antonin","Antonina","Antonino","Antonio","Antonius","Antons","Antony","Antrim","Anurag","Anuska","Any","Anya","Anyah","Anzovin","Apfel","Apfelstadt","Apgar","Aphra","Aphrodite","Apicella","Apollo","Apollus","Apostles","Appel","Apple","Appleby","Appledorf","Applegate","Appleton","Appolonia","Apps","April","Aprile","Aprilette","Apthorp","Apul","Ara","Arabeila","Arabel","Arabela","Arabele","Arabella","Arabelle","Arad","Arakawa","Araldo","Aramanta","Aramen","Aramenta","Araminta","Aran","Arand","Arathorn","Arbe","Arber","Arbuckle","Arch","Archaimbaud","Archambault","Archangel","Archer","Archibald","Archibaldo","Archibold","Archie","Archle","Archy","Ard","Arda","Ardath","Arde","Ardeen","Ardeha","Ardehs","Ardel","Ardelia","Ardelis","Ardell","Ardella","Ardelle","Arden","Ardene","Ardenia","Ardeth","Ardie","Ardin","Ardine","Ardis","Ardisj","Ardith","Ardme","Ardolino","Ardra","Ardrey","Ardussi","Ardy","Ardyce","Ardys","Ardyth","Arel","Arela","Arella","Arelus","Aret","Areta","Aretha","Aretina","Aretta","Arette","Arezzini","Argent","Argile","Argus","Argyle","Argyres","Arhna","Ari","Aria","Ariadne","Ariana","Ariane","Arianie","Arianna","Arianne","Aribold","Aric","Arica","Arick","Aridatha","Arie","Ariel","Ariela","Ariella","Arielle","Ariew","Arin","Ario","Arissa","Aristotle","Arita","Arjan","Arjun","Ark","Arlan","Arlana","Arlee","Arleen","Arlen","Arlena","Arlene","Arleta","Arlette","Arley","Arleyne","Arlie","Arliene","Arlin","Arlina","Arlinda","Arline","Arlo","Arlon","Arluene","Arly","Arlyn","Arlyne","Arlynne","Armalda","Armalla","Armallas","Arman","Armand","Armanda","Armando","Armbrecht","Armbruster","Armelda","Armil","Armilda","Armilla","Armillas","Armillda","Armillia","Armin","Armington","Armitage","Armond","Armstrong","Armyn","Arnaldo","Arnaud","Arndt","Arne","Arnelle","Arney","Arni","Arnie","Arno","Arnold","Arnoldo","Arnon","Arnst","Arnuad","Arnulfo","Arny","Arola","Aron","Arondel","Arondell","Aronoff","Aronow","Aronson","Arquit","Arratoon","Arri","Arria","Arrio","Arron","Arst","Art","Arta","Artair","Artamas","Arte","Artema","Artemas","Artemis","Artemisa","Artemisia","Artemus","Arther","Arthur","Artie","Artima","Artimas","Artina","Artur","Arturo","Artus","Arty","Aruabea","Arun","Arundel","Arundell","Arv","Arva","Arvad","Arvell","Arvid","Arvie","Arvin","Arvind","Arvo","Arvonio","Arvy","Ary","Aryn","As","Asa","Asabi","Asante","Asaph","Asare","Aschim","Ase","Asel","Ash","Asha","Ashbaugh","Ashbey","Ashby","Ashelman","Ashely","Asher","Ashford","Ashia","Ashien","Ashil","Ashjian","Ashla","Ashlan","Ashlee","Ashleigh","Ashlen","Ashley","Ashli","Ashlie","Ashlin","Ashling","Ashly","Ashman","Ashmead","Ashok","Ashraf","Ashti","Ashton","Ashwell","Ashwin","Asia","Askari","Askwith","Aslam","Asp","Aspa","Aspasia","Aspia","Asquith","Assisi","Asta","Astera","Asteria","Astor","Astra","Astraea","Astrahan","Astrea","Astred","Astri","Astrid","Astrix","Astto","Asuncion","Atal","Atalanta","Atalante","Atalanti","Atalaya","Atalayah","Atalee","Ataliah","Atalie","Atalya","Atcliffe","Athal","Athalee","Athalia","Athalie","Athalla","Athallia","Athelstan","Athena","Athene","Athenian","Athey","Athiste","Atiana","Atkins","Atkinson","Atlanta","Atlante","Atlas","Atlee","Atonsah","Atrice","Atronna","Attah","Attalanta","Attalie","Attenborough","Attenweiler","Atterbury","Atthia","Attlee","Attwood","Atul","Atwater","Atwekk","Atwood","Atworth","Au","Aubarta","Aube","Auberbach","Auberon","Aubert","Auberta","Aubigny","Aubin","Aubine","Aubree","Aubreir","Aubrette","Aubrey","Aubrie","Aubry","Auburn","Auburta","Aubyn","Audette","Audi","Audie","Audley","Audly","Audra","Audras","Audre","Audres","Audrey","Audri","Audrie","Audris","Audrit","Audry","Audrye","Audsley","Audun","Audwen","Audwin","Audy","Auerbach","Aufmann","Augie","August","Augusta","Auguste","Augustin","Augustina","Augustine","Augusto","Augustus","Augy","Aulea","Auliffe","Aun","Aundrea","Aunson","Aura","Aurea","Aurel","Aurelea","Aurelia","Aurelie","Aurelio","Aurelius","Auria","Auric","Aurie","Aurilia","Aurita","Aurlie","Auroora","Aurora","Aurore","Aurthur","Ause","Austen","Austin","Austina","Austine","Auston","Australia","Austreng","Autrey","Autry","Autum","Autumn","Auvil","Av","Ava","Avan","Avaria","Ave","Avelin","Aveline","Avera","Averell","Averi","Averil","Averill","Averir","Avery","Averyl","Avi","Avictor","Avie","Avigdor","Avilla","Avis","Avitzur","Aviv","Aviva","Avivah","Avner","Avra","Avraham","Avram","Avril","Avrit","Avrom","Avron","Avruch","Awad","Ax","Axe","Axel","Aylmar","Aylmer","Aylsworth","Aylward","Aymer","Ayn","Aynat","Ayo","Ayres","Azal","Azalea","Azaleah","Azar","Azarcon","Azaria","Azarria","Azelea","Azeria","Aziza","Azpurua","Azral","Azriel","Baal","Baalbeer","Baalman","Bab","Babara","Babb","Babbette","Babbie","Babby","Babcock","Babette","Babita","Babs","Bac","Bacchus","Bach","Bachman","Backer","Backler","Bacon","Badger","Badr","Baecher","Bael","Baelbeer","Baer","Baerl","Baerman","Baese","Bagger","Baggett","Baggott","Baggs","Bagley","Bahner","Bahr","Baiel","Bail","Bailar","Bailey","Bailie","Baillie","Baillieu","Baily","Bain","Bainbridge","Bainbrudge","Bainter","Baird","Baiss","Bajaj","Bak","Bakeman","Bakemeier","Baker","Bakerman","Bakki","Bal","Bala","Balas","Balbinder","Balbur","Balcer","Balch","Balcke","Bald","Baldridge","Balduin","Baldwin","Bale","Baler","Balf","Balfore","Balfour","Balkin","Ball","Ballard","Balliett","Balling","Ballinger","Balliol","Ballman","Ballou","Balmuth","Balough","Balsam","Balthasar","Balthazar","Bamberger","Bambi","Bambie","Bamby","Bamford","Ban","Bancroft","Bandeen","Bander","Bandler","Bandur","Banebrudge","Banerjee","Bang","Bank","Banks","Banky","Banna","Bannasch","Bannerman","Bannister","Bannon","Banquer","Banwell","Baptist","Baptista","Baptiste","Baptlsta","Bar","Bara","Barabas","Barabbas","Baram","Baras","Barayon","Barb","Barbabas","Barbabra","Barbara","Barbara-Anne","Barbaraanne","Barbarese","Barbaresi","Barbe","Barbee","Barber","Barbette","Barbey","Barbi","Barbie","Barbour","Barboza","Barbra","Barbur","Barbuto","Barby","Barcellona","Barclay","Barcot","Barcroft","Barcus","Bard","Barde","Barden","Bardo","Barfuss","Barger","Bari","Barimah","Barina","Barker","Barkley","Barling","Barlow","Barmen","Barn","Barna","Barnaba","Barnabas","Barnabe","Barnaby","Barnard","Barncard","Barnebas","Barnes","Barnet","Barnett","Barney","Barnie","Barnum","Barny","Barolet","Baron","Barr","Barra","Barrada","Barram","Barraza","Barren","Barret","Barrett","Barri","Barrie","Barrington","Barris","Barron","Barrow","Barrus","Barry","Barsky","Barstow","Bart","Barta","Bartel","Barth","Barthel","Barthelemy","Barthol","Barthold","Bartholemy","Bartholomeo","Bartholomeus","Bartholomew","Bartie","Bartko","Bartle","Bartlet","Bartlett","Bartley","Bartolemo","Bartolome","Bartolomeo","Barton","Bartosch","Bartram","Barty","Baruch","Barvick","Bary","Baryram","Bascio","Bascomb","Base","Baseler","Basham","Bashee","Bashemath","Bashemeth","Bashuk","Basia","Basil","Basile","Basilio","Basilius","Basir","Baskett","Bass","Basset","Bassett","Basso","Bast","Bastian","Bastien","Bat","Batchelor","Bate","Baten","Bates","Batha","Bathelda","Bathesda","Bathilda","Batholomew","Bathsheb","Bathsheba","Bathsheeb","Bathulda","Batish","Batista","Batory","Batruk","Batsheva","Battat","Battista","Battiste","Batty","Baudelaire","Baudin","Baudoin","Bauer","Baugh","Baum","Baumann","Baumbaugh","Baun","Bausch","Bauske","Bautista","Bautram","Bax","Baxie","Baxter","Baxy","Bay","Bayard","Bayer","Bayless","Baylor","Bayly","Baynebridge","Bazar","Bazil","Bazluke","Bea","Beach","Beacham","Beal","Beale","Beall","Bealle","Bean","Beane","Beaner","Bear","Bearce","Beard","Beare","Bearnard","Beasley","Beaston","Beata","Beatrice","Beatrisa","Beatrix","Beatriz","Beattie","Beatty","Beau","Beauchamp","Beaudoin","Beaufert","Beaufort","Beaulieu","Beaumont","Beauregard","Beauvais","Beaver","Bebe","Beberg","Becca","Bechler","Becht","Beck","Becka","Becker","Beckerman","Becket","Beckett","Becki","Beckie","Beckman","Becky","Bedad","Bedelia","Bedell","Bedwell","Bee","Beebe","Beeck","Beedon","Beekman","Beera","Beesley","Beeson","Beetner","Beffrey","Bega","Begga","Beghtol","Behah","Behka","Behl","Behlau","Behlke","Behm","Behn","Behnken","Behre","Behrens","Beichner","Beilul","Bein","Beisel","Beitch","Beitnes","Beitris","Beitz","Beka","Bekah","Bekelja","Beker","Bekha","Bekki","Bel","Bela","Belak","Belamy","Belanger","Belayneh","Belcher","Belda","Belden","Belding","Belen","Belford","Belia","Belicia","Belier","Belinda","Belita","Bell","Bella","Bellamy","Bellanca","Bellaude","Bellda","Belldame","Belldas","Belle","Beller","Bellew","Bellina","Bellis","Bello","Belloir","Belmonte","Belshin","Belsky","Belter","Beltran","Belva","Belvia","Ben","Bena","Bencion","Benco","Bender","Bendick","Bendicta","Bendicty","Bendite","Bendix","Benedetta","Benedetto","Benedic","Benedick","Benedict","Benedicta","Benedicto","Benedikt","Benedikta","Benedix","Benenson","Benetta","Benge","Bengt","Benia","Beniamino","Benil","Benilda","Benildas","Benildis","Benioff","Benis","Benisch","Benita","Benito","Benjamen","Benjamin","Benji","Benjie","Benjy","Benkley","Benn","Bennet","Bennett","Benni","Bennie","Bennink","Bennion","Bennir","Benny","Benoit","Benoite","Bensen","Bensky","Benson","Bent","Bentlee","Bentley","Bently","Benton","Benyamin","Benzel","Beora","Beore","Ber","Berard","Berardo","Berck","Berenice","Beret","Berey","Berfield","Berg","Berga","Bergeman","Bergen","Berger","Bergerac","Bergeron","Bergess","Berget","Bergh","Berghoff","Bergin","Berglund","Bergman","Bergmann","Bergmans","Bergquist","Bergren","Bergstein","Bergstrom","Bergwall","Berhley","Berk","Berke","Berkeley","Berkie","Berkin","Berkley","Berkly","Berkman","Berkow","Berkshire","Berky","Berl","Berlauda","Berlin","Berlinda","Berliner","Berlyn","Berman","Bern","Berna","Bernadene","Bernadette","Bernadina","Bernadine","Bernard","Bernardi","Bernardina","Bernardine","Bernardo","Bernarr","Bernat","Berne","Bernelle","Berner","Berners","Berneta","Bernete","Bernetta","Bernette","Bernhard","Berni","Bernice","Bernie","Bernita","Bernj","Berns","Bernstein","Bernt","Berny","Berri","Berrie","Berriman","Berry","Berstine","Bert","Berta","Bertasi","Berte","Bertelli","Bertero","Bertha","Berthe","Berthold","Berthoud","Berti","Bertie","Bertila","Bertilla","Bertina","Bertine","Bertle","Bertold","Bertolde","Berton","Bertram","Bertrand","Bertrando","Bertsche","Berty","Berwick","Beryl","Beryle","Beshore","Besnard","Bess","Besse","Bessie","Bessy","Best","Beth","Bethanne","Bethany","Bethel","Bethena","Bethesda","Bethesde","Bethezel","Bethina","Betsey","Betsy","Betta","Bette","Bette-Ann","Betteann","Betteanne","Bettencourt","Betthel","Betthezel","Betthezul","Betti","Bettina","Bettine","Betty","Bettye","Bettzel","Betz","Beulah","Beuthel","Beutler","Beutner","Bev","Bevan","Bevash","Bever","Beverie","Beverle","Beverlee","Beverley","Beverlie","Beverly","Bevers","Bevin","Bevis","Bevon","Bevus","Bevvy","Beyer","Bezanson","Bhatt","Bhayani","Biagi","Biagio","Biamonte","Bianca","Biancha","Bianchi","Bianka","Bibbie","Bibby","Bibbye","Bibeau","Bibi","Bible","Bick","Bickart","Bicknell","Biddick","Biddie","Biddle","Biddy","Bidget","Bidle","Biebel","Biegel","Bierman","Biernat","Bigelow","Bigford","Bigg","Biggs","Bigler","Bigner","Bigod","Bigot","Bik","Bikales","Bil","Bilbe","Bilek","Biles","Bili","Bilicki","Bill","Billat","Bille","Billen","Billi","Billie","Billmyre","Bills","Billy","Billye","Bilow","Bilski","Bina","Binah","Bindman","Binetta","Binette","Bing","Bink","Binky","Binni","Binnie","Binnings","Binny","Biondo","Birch","Birchard","Birck","Bird","Birdella","Birdie","Birdt","Birecree","Birgit","Birgitta","Birk","Birkett","Birkle","Birkner","Birmingham","Biron","Bish","Bishop","Bissell","Bisset","Bithia","Bittencourt","Bitthia","Bittner","Bivins","Bixby","Bixler","Bjork","Bjorn","Black","Blackburn","Blackington","Blackman","Blackmore","Blackmun","Blackstock","Blackwell","Blader","Blain","Blaine","Blainey","Blair","Blaire","Blaise","Blake","Blakelee","Blakeley","Blakely","Blalock","Blanc","Blanca","Blanch","Blancha","Blanchard","Blanche","Blanchette","Bland","Blandina","Blanding","Blane","Blank","Blanka","Blankenship","Blas","Blase","Blaseio","Blasien","Blasius","Blatman","Blatt","Blau","Blayne","Blayze","Blaze","Bledsoe","Bleier","Blen","Blessington","Blight","Blim","Blinni","Blinnie","Blinny","Bliss","Blisse","Blithe","Bloch","Block","Blockus","Blodget","Blodgett","Bloem","Blondell","Blondelle","Blondie","Blondy","Blood","Bloom","Bloomer","Blossom","Blount","Bloxberg","Bluefarb","Bluefield","Bluh","Bluhm","Blum","Bluma","Blumenfeld","Blumenthal","Blunk","Blunt","Blus","Blynn","Blythe","Bo","Boak","Boar","Boardman","Boarer","Boaten","Boatwright","Bob","Bobbe","Bobbee","Bobbette","Bobbi","Bobbie","Bobby","Bobbye","Bobette","Bobina","Bobine","Bobinette","Bobker","Bobseine","Bock","Bocock","Bodi","Bodkin","Bodnar","Bodrogi","Bodwell","Body","Boehike","Boehmer","Boeke","Boelter","Boesch","Boeschen","Boff","Boffa","Bogart","Bogey","Boggers","Boggs","Bogie","Bogoch","Bogosian","Bogusz","Bohannon","Bohaty","Bohi","Bohlen","Bohlin","Bohman","Bohner","Bohon","Bohrer","Bohs","Bohun","Boice","Boigie","Boiney","Bois","Bolan","Boland","Bolanger","Bolen","Boles","Boleslaw","Boleyn","Bolger","Bolitho","Bollay","Bollen","Bolling","Bollinger","Bolme","Bolt","Bolte","Bolten","Bolton","Bomke","Bonacci","Bonaparte","Bonar","Bond","Bondie","Bondon","Bondy","Bone","Boni","Boniface","Bonilla","Bonina","Bonine","Bonis","Bonita","Bonn","Bonne","Bonneau","Bonnee","Bonnell","Bonner","Bonnes","Bonnette","Bonney","Bonni","Bonnibelle","Bonnice","Bonnie","Bonns","Bonny","Bonucci","Booker","Booma","Boone","Boonie","Boony","Boor","Boorer","Boorman","Boot","Boote","Booth","Boothe","Boothman","Booze","Bopp","Bor","Bora","Borchers","Borchert","Bord","Borden","Bordie","Bordiuk","Bordy","Bore","Borek","Borer","Bores","Borg","Borgeson","Boris","Bork","Borlase","Borlow","Borman","Born","Bornie","Bornstein","Borras","Borrell","Borreri","Borries","Borroff","Borszcz","Bortman","Bortz","Boru","Bosch","Bose","Boser","Bosson","Bostow","Boswall","Boswell","Botnick","Botsford","Bottali","Botti","Botzow","Bouchard","Boucher","Bouchier","Boudreaux","Bough","Boulanger","Bouldon","Bouley","Bound","Bounds","Bourgeois","Bourke","Bourn","Bourne","Bourque","Boutis","Bouton","Bouzoun","Bove","Bovill","Bow","Bowden","Bowe","Bowen","Bower","Bowerman","Bowers","Bowes","Bowie","Bowlds","Bowler","Bowles","Bowman","Bowne","Bowra","Bowrah","Bowyer","Box","Boy","Boyce","Boycey","Boycie","Boyd","Boyden","Boyer","Boyes","Boykins","Boylan","Boylston","Boynton","Boys","Boyse","Boyt","Bozovich","Bozuwa","Braasch","Brabazon","Braca","Bracci","Brace","Brackely","Brackett","Brad","Bradan","Brade","Braden","Bradeord","Brader","Bradford","Bradlee","Bradleigh","Bradley","Bradly","Bradman","Bradney","Bradshaw","Bradski","Bradstreet","Bradway","Bradwell","Brady","Braeunig","Brag","Brahear","Brainard","Bram","Bramwell","Bran","Brana","Branca","Branch","Brand","Brandais","Brande","Brandea","Branden","Brandenburg","Brander","Brandes","Brandi","Brandice","Brandie","Brandise","Brandon","Brandt","Brandtr","Brandwein","Brandy","Brandyn","Branen","Branham","Brannon","Branscum","Brant","Brantley","Brasca","Brass","Braswell","Brathwaite","Bratton","Braun","Braunstein","Brause","Bravar","Bravin","Brawley","Brawner","Bray","Braynard","Brazee","Breana","Breanne","Brear","Breban","Brebner","Brecher","Brechtel","Bred","Bree","Breech","Breed","Breen","Breena","Breeze","Breger","Brelje","Bremble","Bremen","Bremer","Bremser","Bren","Brena","Brenan","Brenda","Brendan","Brenden","Brendin","Brendis","Brendon","Brenk","Brenn","Brenna","Brennan","Brennen","Brenner","Brent","Brenton","Brentt","Brenza","Bresee","Breskin","Brest","Bret","Brett","Brew","Brewer","Brewster","Brey","Brezin","Bria","Brian","Briana","Brianna","Brianne","Briano","Briant","Brice","Brick","Bricker","Bride","Bridge","Bridges","Bridget","Bridgette","Bridgid","Bridie","Bridwell","Brie","Brien","Brier","Brieta","Brietta","Brig","Brigette","Brigg","Briggs","Brigham","Bright","Brightman","Brighton","Brigid","Brigida","Brigit","Brigitta","Brigitte","Brill","Brina","Brindell","Brindle","Brine","Briney","Bringhurst","Brink","Brinkema","Brinn","Brinna","Brinson","Briny","Brion","Briscoe","Bristow","Brit","Brita","Britney","Britni","Britt","Britta","Brittain","Brittan","Brittaney","Brittani","Brittany","Britte","Britteny","Brittne","Brittnee","Brittney","Brittni","Britton","Brnaba","Brnaby","Broadbent","Brock","Brockie","Brocklin","Brockwell","Brocky","Brod","Broddie","Broddy","Brodench","Broder","Broderic","Broderick","Brodeur","Brodie","Brodsky","Brody","Broeder","Broek","Broeker","Brogle","Broida","Brok","Brom","Bromleigh","Bromley","Bron","Bronder","Bronez","Bronk","Bronnie","Bronny","Bronson","Bronwen","Bronwyn","Brook","Brooke","Brookes","Brookhouse","Brooking","Brookner","Brooks","Broome","Brose","Brosine","Brost","Brosy","Brote","Brothers","Brotherson","Brott","Brottman","Broucek","Brout","Brouwer","Brower","Brown","Browne","Browning","Brownley","Brownson","Brozak","Brubaker","Bruce","Brucie","Bruckner","Bruell","Brufsky","Bruis","Brunell","Brunella","Brunelle","Bruner","Brunhild","Brunhilda","Brunhilde","Bruni","Bruning","Brunk","Brunn","Bruno","Bruns","Bruyn","Bryan","Bryana","Bryant","Bryanty","Bryce","Bryn","Bryna","Bryner","Brynn","Brynna","Brynne","Bryon","Buatti","Bubalo","Bubb","Bucella","Buchalter","Buchanan","Buchbinder","Bucher","Buchheim","Buck","Buckden","Buckels","Buckie","Buckingham","Buckler","Buckley","Bucky","Bud","Budd","Budde","Buddie","Budding","Buddy","Buderus","Budge","Budwig","Budworth","Buehler","Buehrer","Buell","Buerger","Bueschel","Buff","Buffo","Buffum","Buffy","Buford","Bugbee","Buhler","Bui","Buine","Buiron","Buke","Bull","Bullard","Bullen","Buller","Bulley","Bullion","Bullis","Bullivant","Bullock","Bullough","Bully","Bultman","Bum","Bumgardner","Buna","Bunce","Bunch","Bunde","Bunder","Bundy","Bunker","Bunni","Bunnie","Bunns","Bunny","Bunow","Bunting","Buonomo","Buote","Burack","Burbank","Burch","Burchett","Burck","Burd","Burdelle","Burdett","Burford","Burg","Burgener","Burger","Burgess","Burget","Burgwell","Burhans","Burk","Burke","Burkhard","Burkhardt","Burkhart","Burkitt","Burkle","Burkley","Burl","Burleigh","Burley","Burlie","Burman","Burn","Burnaby","Burnard","Burne","Burner","Burnett","Burney","Burnham","Burnie","Burnight","Burnley","Burns","Burnsed","Burnside","Burny","Buroker","Burr","Burra","Burrell","Burrill","Burris","Burroughs","Burrow","Burrows","Burrton","Burrus","Burt","Burta","Burtie","Burtis","Burton","Burty","Burwell","Bury","Busby","Busch","Buschi","Buseck","Busey","Bush","Bushey","Bushore","Bushweller","Busiek","Buskirk","Buskus","Bussey","Bussy","Bust","Butch","Butcher","Butler","Butta","Buttaro","Butte","Butterfield","Butterworth","Button","Buxton","Buyer","Buyers","Buyse","Buzz","Buzzell","Byers","Byler","Byram","Byran","Byrann","Byrd","Byrdie","Byrle","Byrn","Byrne","Byrom","Byron","Bysshe","Bywaters","Bywoods","Cacia","Cacie","Cacilia","Cacilie","Cacka","Cad","Cadal","Caddaric","Caddric","Cade","Cadel","Cadell","Cadman","Cadmann","Cadmar","Cadmarr","Caesar","Caesaria","Caffrey","Cagle","Cahan","Cahilly","Cahn","Cahra","Cai","Caia","Caiaphas","Cailean","Cailly","Cain","Caine","Caines","Cairistiona","Cairns","Caitlin","Caitrin","Cal","Calabrese","Calabresi","Calan","Calandra","Calandria","Calbert","Caldeira","Calder","Caldera","Calderon","Caldwell","Cale","Caleb","Calen","Calendra","Calendre","Calesta","Calhoun","Calia","Calica","Calida","Calie","Calisa","Calise","Calista","Call","Calla","Callahan","Callan","Callas","Calle","Callean","Callery","Calley","Calli","Callida","Callie","Callista","Calloway","Callum","Cally","Calmas","Calondra","Calore","Calv","Calva","Calvano","Calvert","Calvin","Calvina","Calvinna","Calvo","Calypso","Calysta","Cam","Camala","Camarata","Camden","Camel","Camella","Camellia","Cameron","Camey","Camfort","Cami","Camila","Camile","Camilia","Camilla","Camille","Camilo","Camm","Cammi","Cammie","Cammy","Camp","Campagna","Campball","Campbell","Campman","Campney","Campos","Campy","Camus","Can","Canada","Canale","Cand","Candace","Candi","Candice","Candida","Candide","Candie","Candis","Candless","Candra","Candy","Candyce","Caneghem","Canfield","Canica","Canice","Caniff","Cann","Cannell","Cannice","Canning","Cannon","Canon","Canotas","Canter","Cantlon","Cantone","Cantu","Canty","Canute","Capello","Caplan","Capon","Capone","Capp","Cappella","Cappello","Capps","Caprice","Capriola","Caputo","Caputto","Capwell","Car","Cara","Caralie","Caras","Caravette","Caraviello","Carberry","Carbo","Carbone","Carboni","Carbrey","Carce","Card","Carder","Cardew","Cardie","Cardinal","Cardon","Cardwell","Care","Careaga","Caren","Carena","Caresa","Caressa","Caresse","Carew","Carey","Cargian","Carhart","Cari","Caria","Carie","Caril","Carilla","Carilyn","Carin","Carina","Carine","Cariotta","Carisa","Carissa","Carita","Caritta","Carl","Carla","Carlee","Carleen","Carlen","Carlene","Carleton","Carley","Carli","Carlick","Carlie","Carlile","Carlin","Carlina","Carline","Carling","Carlisle","Carlita","Carlo","Carlock","Carlos","Carlota","Carlotta","Carlson","Carlstrom","Carlton","Carly","Carlye","Carlyle","Carlyn","Carlynn","Carlynne","Carma","Carman","Carmel","Carmela","Carmelia","Carmelina","Carmelita","Carmella","Carmelle","Carmelo","Carmen","Carmena","Carmencita","Carmina","Carmine","Carmita","Carmon","Carn","Carnahan","Carnay","Carnes","Carney","Carny","Caro","Carol","Carol-Jean","Carola","Carolan","Carolann","Carole","Carolee","Carolin","Carolina","Caroline","Carolle","Carolus","Carolyn","Carolyne","Carolynn","Carolynne","Caron","Carothers","Carpenter","Carper","Carpet","Carpio","Carr","Carree","Carrel","Carrelli","Carrew","Carri","Carrick","Carrie","Carrillo","Carrington","Carrissa","Carrnan","Carrol","Carroll","Carry","Carson","Cart","Cartan","Carter","Carthy","Cartie","Cartwell","Cartwright","Caruso","Carver","Carvey","Cary","Caryl","Caryn","Cas","Casabonne","Casady","Casaleggio","Casandra","Casanova","Casar","Casavant","Case","Casey","Cash","Casi","Casia","Casie","Casilda","Casilde","Casimir","Casimire","Casmey","Caspar","Casper","Cass","Cassady","Cassandra","Cassandre","Cassandry","Cassaundra","Cassell","Cassella","Cassey","Cassi","Cassiani","Cassidy","Cassie","Cassil","Cassilda","Cassius","Cassondra","Cassy","Casta","Castara","Casteel","Castera","Castillo","Castle","Castor","Castora","Castorina","Castra","Castro","Caswell","Cataldo","Catarina","Cate","Caterina","Cates","Cath","Catha","Catharina","Catharine","Cathe","Cathee","Catherin","Catherina","Catherine","Cathey","Cathi","Cathie","Cathleen","Cathlene","Cathrin","Cathrine","Cathryn","Cathy","Cathyleen","Cati","Catie","Catima","Catina","Catlaina","Catlee","Catlin","Cato","Caton","Catrina","Catriona","Catt","Cattan","Cattier","Cattima","Catto","Catton","Caty","Caughey","Caundra","Cavallaro","Cavan","Cavanagh","Cavanaugh","Cave","Caves","Cavil","Cavill","Cavit","Cavuoto","Cawley","Caye","Cayla","Caylor","Cayser","Caz","Cazzie","Cchaddie","Cece","Cecelia","Cecil","Cecile","Ceciley","Cecilia","Cecilio","Cecilius","Cecilla","Cecily","Ced","Cedar","Cedell","Cedric","Ceevah","Ceil","Cele","Celene","Celeski","Celesta","Celeste","Celestia","Celestina","Celestine","Celestyn","Celestyna","Celia","Celie","Celik","Celin","Celina","Celinda","Celine","Celinka","Celio","Celisse","Celka","Celle","Cello","Celtic","Cenac","Cence","Centeno","Center","Centonze","Ceporah","Cerallua","Cerelia","Cerell","Cerellia","Cerelly","Cerf","Cerracchio","Certie","Cerveny","Cerys","Cesar","Cesare","Cesaria","Cesaro","Cestar","Cesya","Cha","Chabot","Chace","Chad","Chadabe","Chadbourne","Chadburn","Chadd","Chaddie","Chaddy","Chader","Chadwick","Chae","Chafee","Chaffee","Chaffin","Chaffinch","Chaiken","Chaille","Chaim","Chainey","Chaing","Chak","Chaker","Chally","Chalmer","Chalmers","Chamberlain","Chamberlin","Chambers","Chamkis","Champ","Champagne","Champaigne","Chan","Chance","Chancellor","Chancelor","Chancey","Chanda","Chandal","Chandler","Chandless","Chandos","Chandra","Chane","Chaney","Chang","Changaris","Channa","Channing","Chansoo","Chantal","Chantalle","Chao","Chap","Chapa","Chapel","Chapell","Chapen","Chapin","Chapland","Chapman","Chapnick","Chappelka","Chappell","Chappie","Chappy","Chara","Charbonneau","Charbonnier","Chard","Chari","Charie","Charil","Charin","Chariot","Charis","Charissa","Charisse","Charita","Charity","Charla","Charlean","Charleen","Charlena","Charlene","Charles","Charlet","Charleton","Charley","Charlie","Charline","Charlot","Charlotta","Charlotte","Charlton","Charmain","Charmaine","Charmane","Charmian","Charmine","Charmion","Charo","Charpentier","Charron","Charry","Charteris","Charters","Charyl","Chas","Chase","Chasse","Chassin","Chastain","Chastity","Chatav","Chatterjee","Chatwin","Chaudoin","Chaunce","Chauncey","Chavaree","Chaves","Chavey","Chavez","Chaworth","Che","Cheadle","Cheatham","Checani","Chee","Cheffetz","Cheke","Chellman","Chelsae","Chelsea","Chelsey","Chelsie","Chelsy","Chelton","Chem","Chema","Chemar","Chemaram","Chemarin","Chemash","Chemesh","Chemosh","Chemush","Chen","Chenay","Chenee","Cheney","Cheng","Cher","Chere","Cherey","Cheri","Cheria","Cherian","Cherianne","Cherice","Cherida","Cherie","Cherilyn","Cherilynn","Cherin","Cherise","Cherish","Cherlyn","Chernow","Cherri","Cherrita","Cherry","Chery","Cherye","Cheryl","Ches","Cheshire","Cheslie","Chesna","Chesney","Chesnut","Chessa","Chessy","Chester","Cheston","Chet","Cheung","Chev","Chevalier","Chevy","Chew","Cheyne","Cheyney","Chi","Chiaki","Chiang","Chiarra","Chic","Chick","Chickie","Chicky","Chico","Chicoine","Chien","Chil","Chilcote","Child","Childers","Childs","Chiles","Chill","Chilson","Chilt","Chilton","Chimene","Chin","China","Ching","Chinua","Chiou","Chip","Chipman","Chiquia","Chiquita","Chirlin","Chisholm","Chita","Chitkara","Chivers","Chladek","Chlo","Chloe","Chloette","Chloras","Chlores","Chlori","Chloris","Cho","Chobot","Chon","Chong","Choo","Choong","Chor","Chouest","Chow","Chretien","Chris","Chrisman","Chrisoula","Chrissa","Chrisse","Chrissie","Chrissy","Christa","Christabel","Christabella","Christabelle","Christal","Christalle","Christan","Christean","Christel","Christen","Christensen","Christenson","Christi","Christian","Christiana","Christiane","Christianity","Christianna","Christiano","Christiansen","Christianson","Christie","Christin","Christina","Christine","Christis","Christmann","Christmas","Christoffer","Christoforo","Christoper","Christoph","Christophe","Christopher","Christos","Christy","Christye","Christyna","Chrisy","Chrotoem","Chrysa","Chrysler","Chrystal","Chryste","Chrystel","Chu","Chuah","Chubb","Chuch","Chucho","Chuck","Chud","Chui","Chuipek","Chun","Chung","Chura","Church","Churchill","Chute","Chuu","Chyou","Cia","Cianca","Ciapas","Ciapha","Ciaphus","Cibis","Ciccia","Cicely","Cicenia","Cicero","Cichocki","Cicily","Cid","Cida","Ciel","Cila","Cilka","Cilla","Cilo","Cilurzo","Cima","Cimah","Cimbura","Cinda","Cindee","Cindelyn","Cinderella","Cindi","Cindie","Cindra","Cindy","Cinelli","Cini","Cinnamon","Cioban","Cioffred","Ciprian","Circosta","Ciri","Cirilla","Cirillo","Cirilo","Ciro","Cirone","Cirri","Cis","Cissie","Cissiee","Cissy","Cita","Citarella","Citron","Clabo","Claiborn","Claiborne","Clair","Claire","Claman","Clance","Clancy","Clapp","Clapper","Clara","Clarabelle","Clarance","Clardy","Clare","Clarence","Claresta","Clareta","Claretta","Clarette","Clarey","Clarhe","Clari","Claribel","Clarice","Clarie","Clarinda","Clarine","Clarisa","Clarise","Clarissa","Clarisse","Clarita","Clark","Clarke","Clarkin","Clarkson","Clary","Claud","Clauddetta","Claude","Claudell","Claudelle","Claudetta","Claudette","Claudia","Claudian","Claudianus","Claudie","Claudina","Claudine","Claudio","Claudius","Claudy","Claus","Clausen","Clava","Clawson","Clay","Clayberg","Clayborn","Clayborne","Claybourne","Clayson","Clayton","Clea","Cleary","Cleasta","Cleave","Cleaves","Cleavland","Clein","Cleland","Clellan","Clem","Clemen","Clemence","Clemens","Clement","Clementas","Clemente","Clementi","Clementia","Clementina","Clementine","Clementis","Clementius","Clements","Clemmie","Clemmy","Cleo","Cleodal","Cleodel","Cleodell","Cleon","Cleopatra","Cleopatre","Clerc","Clercq","Clere","Cleres","Clerissa","Clerk","Cleti","Cletis","Cletus","Cleve","Cleveland","Clevey","Clevie","Clie","Cliff","Cliffes","Clifford","Clift","Clifton","Clim","Cline","Clint","Clintock","Clinton","Clio","Clippard","Clite","Clive","Clo","Cloe","Cloots","Clorinda","Clorinde","Cloris","Close","Clothilde","Clotilda","Clotilde","Clough","Clougher","Cloutman","Clova","Clovah","Clover","Clovis","Clower","Clute","Cly","Clyde","Clymer","Clynes","Clyte","Clyve","Clywd","Cnut","Coad","Coady","Coates","Coats","Cob","Cobb","Cobbie","Cobby","Coben","Cochard","Cochran","Cochrane","Cock","Cockburn","Cocke","Cocks","Coco","Codd","Codding","Codee","Codel","Codi","Codie","Cody","Coe","Coffee","Coffeng","Coffey","Coffin","Cofsky","Cogan","Cogen","Cogswell","Coh","Cohbath","Cohberg","Cohbert","Cohby","Cohdwell","Cohe","Coheman","Cohen","Cohette","Cohin","Cohl","Cohla","Cohleen","Cohlette","Cohlier","Cohligan","Cohn","Cointon","Coit","Coke","Col","Colan","Colas","Colb","Colbert","Colburn","Colby","Colbye","Cole","Coleen","Coleman","Colene","Colet","Coletta","Colette","Coleville","Colfin","Colier","Colin","Colinson","Colis","Collar","Collayer","Collbaith","Colleen","Collen","Collete","Collette","Colley","Collie","Collier","Colligan","Collimore","Collin","Colline","Collins","Collis","Collum","Colly","Collyer","Colman","Colner","Colombi","Colon","Colp","Colpin","Colson","Colston","Colt","Coltin","Colton","Coltson","Coltun","Columba","Columbine","Columbus","Columbyne","Colver","Colvert","Colville","Colvin","Colwell","Colwen","Colwin","Colyer","Combe","Combes","Combs","Comfort","Compte","Comptom","Compton","Comras","Comstock","Comyns","Con","Conah","Conal","Conall","Conan","Conant","Conard","Concepcion","Concettina","Concha","Conchita","Concoff","Concordia","Condon","Coney","Congdon","Conger","Coniah","Conias","Conlan","Conlee","Conlen","Conley","Conlin","Conlon","Conn","Connel","Connell","Connelley","Connelly","Conner","Conners","Connett","Conney","Conni","Connie","Connolly","Connor","Connors","Conny","Conover","Conrad","Conrade","Conrado","Conroy","Consalve","Consolata","Constance","Constancia","Constancy","Constant","Constanta","Constantia","Constantin","Constantina","Constantine","Constantino","Consuela","Consuelo","Conte","Conti","Converse","Convery","Conway","Cony","Conyers","Cooe","Cook","Cooke","Cookie","Cooley","Coombs","Coonan","Coop","Cooper","Cooperman","Coopersmith","Cooperstein","Cope","Copeland","Copland","Coplin","Copp","Coppinger","Coppins","Coppock","Coppola","Cora","Corabel","Corabella","Corabelle","Coral","Coralie","Coraline","Coralyn","Coray","Corbet","Corbett","Corbie","Corbin","Corby","Cord","Cordalia","Cordeelia","Cordelia","Cordelie","Cordell","Corder","Cordey","Cordi","Cordie","Cordier","Cordle","Cordova","Cordula","Cordy","Coreen","Corel","Corell","Corella","Corena","Corenda","Corene","Coretta","Corette","Corey","Cori","Coridon","Corie","Corilla","Corin","Corina","Corine","Corinna","Corinne","Coriss","Corissa","Corkhill","Corley","Corliss","Corly","Cormac","Cormack","Cormick","Cormier","Cornall","Corneille","Cornel","Cornela","Cornelia","Cornelie","Cornelius","Cornell","Cornelle","Cornew","Corney","Cornia","Cornie","Cornish","Cornwall","Cornwell","Corny","Corotto","Correna","Correy","Corri","Corrianne","Corrie","Corrina","Corrine","Corrinne","Corron","Corry","Corsetti","Corsiglia","Corso","Corson","Cort","Cortie","Cortney","Corty","Corvese","Corvin","Corwin","Corwun","Cory","Coryden","Corydon","Cos","Cosenza","Cosetta","Cosette","Coshow","Cosimo","Cosma","Cosme","Cosmo","Cost","Costa","Costanza","Costanzia","Costello","Coster","Costin","Cote","Cotsen","Cott","Cotter","Cotterell","Cottle","Cottrell","Coucher","Couchman","Coughlin","Coulombe","Coulson","Coulter","Coumas","Countess","Courcy","Court","Courtenay","Courtland","Courtnay","Courtney","Courtund","Cousin","Cousins","Coussoule","Couture","Covell","Coveney","Cowan","Coward","Cowden","Cowen","Cower","Cowey","Cowie","Cowles","Cowley","Cown","Cox","Coy","Coyle","Cozmo","Cozza","Crabb","Craddock","Craggie","Craggy","Craig","Crain","Cralg","Cram","Cramer","Cran","Crandale","Crandall","Crandell","Crane","Craner","Cranford","Cranston","Crary","Craven","Craw","Crawford","Crawley","Creamer","Crean","Creath","Creedon","Creigh","Creight","Creighton","Crelin","Crellen","Crenshaw","Cresa","Crescantia","Crescen","Crescentia","Crescin","Crescint","Cresida","Crespi","Crespo","Cressi","Cressida","Cressler","Cressy","Crichton","Crifasi","Crim","Crin","Cris","Crisey","Crispa","Crispas","Crispen","Crispin","Crissie","Crissy","Crist","Crista","Cristabel","Cristal","Cristen","Cristi","Cristian","Cristiano","Cristie","Cristin","Cristina","Cristine","Cristiona","Cristionna","Cristobal","Cristoforo","Cristy","Criswell","Critchfield","Critta","Crocker","Crockett","Crofoot","Croft","Crofton","Croix","Crompton","Cromwell","Croner","Cronin","Crooks","Croom","Crosby","Crosley","Cross","Crosse","Croteau","Crotty","Crow","Crowe","Crowell","Crowley","Crowns","Croydon","Cruce","Crudden","Cruickshank","Crutcher","Cruz","Cryan","Crysta","Crystal","Crystie","Cthrine","Cuda","Cudlip","Culberson","Culbert","Culbertson","Culhert","Cull","Cullan","Cullen","Culley","Cullie","Cullin","Culliton","Cully","Culosio","Culver","Cumine","Cumings","Cummine","Cummings","Cummins","Cung","Cunningham","Cupo","Curcio","Curhan","Curkell","Curley","Curnin","Curr","Curran","Curren","Currey","Currie","Currier","Curry","Curson","Curt","Curtice","Curtis","Curzon","Cusack","Cusick","Custer","Cut","Cutcheon","Cutcliffe","Cuthbert","Cuthbertson","Cuthburt","Cutler","Cutlerr","Cutlip","Cutlor","Cutter","Cuttie","Cuttler","Cutty","Cuyler","Cy","Cyb","Cybil","Cybill","Cychosz","Cyd","Cykana","Cyler","Cyma","Cymbre","Cyn","Cyna","Cynar","Cynara","Cynarra","Cynde","Cyndi","Cyndia","Cyndie","Cyndy","Cynera","Cynth","Cynthea","Cynthia","Cynthie","Cynthla","Cynthy","Cyprian","Cyprio","Cypro","Cyprus","Cyrano","Cyrie","Cyril","Cyrill","Cyrilla","Cyrille","Cyrillus","Cyrus","Czarra","D'Arcy","Dabbs","Daberath","Dabney","Dace","Dacey","Dachi","Dachia","Dachy","Dacia","Dacie","Dacy","Daegal","Dael","Daffi","Daffie","Daffodil","Daffy","Dafna","Dafodil","Dag","Dagall","Daggett","Daggna","Dagley","Dagmar","Dagna","Dagnah","Dagney","Dagny","Dahl","Dahle","Dahlia","Dahlstrom","Daigle","Dail","Daile","Dailey","Daisey","Daisi","Daisie","Daisy","Daitzman","Dal","Dale","Dalenna","Daley","Dalia","Dalila","Dalis","Dall","Dallas","Dalli","Dallis","Dallman","Dallon","Daloris","Dalpe","Dalston","Dalt","Dalton","Dalury","Daly","Dam","Damal","Damalas","Damales","Damali","Damalis","Damalus","Damara","Damaris","Damarra","Dambro","Dame","Damek","Damian","Damiani","Damiano","Damick","Damicke","Damien","Damita","Damle","Damon","Damour","Dan","Dana","Danae","Danaher","Danais","Danas","Danby","Danczyk","Dane","Danell","Danella","Danelle","Danete","Danette","Daney","Danforth","Dang","Dani","Dania","Daniala","Danialah","Danica","Danice","Danie","Daniel","Daniela","Daniele","Daniell","Daniella","Danielle","Daniels","Danielson","Danieu","Danika","Danila","Danit","Danita","Daniyal","Dann","Danna","Dannel","Danni","Dannica","Dannie","Dannon","Danny","Dannye","Dante","Danuloff","Danya","Danyelle","Danyette","Danyluk","Danzig","Danziger","Dao","Daph","Daphene","Daphie","Daphna","Daphne","Dar","Dara","Darach","Darb","Darbee","Darbie","Darby","Darce","Darcee","Darcey","Darci","Darcia","Darcie","Darcy","Darda","Dardani","Dare","Dareece","Dareen","Darees","Darell","Darelle","Daren","Dari","Daria","Darian","Darice","Darill","Darin","Dario","Darius","Darken","Darla","Darleen","Darlene","Darline","Darlleen","Darmit","Darn","Darnall","Darnell","Daron","Darooge","Darra","Darrel","Darrell","Darrelle","Darren","Darrey","Darrick","Darrill","Darrin","Darrow","Darryl","Darryn","Darsey","Darsie","Dart","Darton","Darwen","Darwin","Darya","Daryl","Daryle","Daryn","Dash","Dasha","Dasi","Dasie","Dasteel","Dasya","Datha","Datnow","Daub","Daugherty","Daughtry","Daukas","Daune","Dav","Dave","Daveda","Daveen","Daven","Davena","Davenport","Daveta","Davey","David","Davida","Davidde","Davide","Davidoff","Davidson","Davie","Davies","Davilman","Davin","Davina","Davine","Davis","Davison","Davita","Davon","Davy","Dawes","Dawkins","Dawn","Dawna","Dawson","Day","Daye","Dayle","Dayna","Ddene","De","DeWitt","Deach","Deacon","Deadman","Dean","Deana","Deane","Deaner","Deanna","Deanne","Dearborn","Dearden","Dearman","Dearr","Deb","Debarath","Debbee","Debbi","Debbie","Debbra","Debby","Debee","Debera","Debi","Debor","Debora","Deborah","Deborath","Debra","Decamp","Decato","Decca","December","Decima","Deck","Decker","Deckert","Declan","Dede","Deden","Dedie","Dedra","Dedric","Dedrick","Dee","DeeDee","DeeAnn","Deeann","Deeanne","Deedee","Deegan","Deena","Deenya","Deer","Deerdre","Deering","Deery","Deeyn","Defant","Dehlia","Dehnel","Deibel","Deidre","Deina","Deirdra","Deirdre","Dekeles","Dekow","Del","Dela","Delacourt","Delaine","Delainey","Delamare","Deland","Delaney","Delanie","Delano","Delanos","Delanty","Delaryd","Delastre","Delbert","Delcina","Delcine","Delfeena","Delfine","Delgado","Delia","Delija","Delila","Delilah","Delinda","Delisle","Dell","Della","Delle","Dellora","Delly","Delmar","Delmer","Delmor","Delmore","Delogu","Delora","Delorenzo","Delores","Deloria","Deloris","Delos","Delp","Delphina","Delphine","Delphinia","Delsman","Delwin","Delwyn","Demaggio","Demakis","Demaria","Demb","Demeter","Demetra","Demetre","Demetri","Demetria","Demetris","Demetrius","Demeyer","Deming","Demitria","Demmer","Demmy","Demodena","Demona","Demott","Demp","Dempsey","Dempster","Dempstor","Demy","Den","Dena","Denae","Denbrook","Denby","Dene","Deni","Denice","Denie","Denis","Denise","Denison","Denman","Denn","Denna","Dennard","Dennet","Dennett","Denney","Denni","Dennie","Dennis","Dennison","Denny","Denoting","Dent","Denten","Denton","Denver","Deny","Denys","Denyse","Denzil","Deonne","Depoliti","Deppy","Der","Deragon","Derayne","Derby","Dercy","Derek","Derian","Derick","Derina","Derinna","Derk","Derman","Dermot","Dermott","Derna","Deron","Deroo","Derr","Derrek","Derrick","Derriey","Derrik","Derril","Derron","Derry","Derte","Derward","Derwin","Derwon","Derwood","Deryl","Derzon","Des","Desai","Desberg","Descombes","Desdamona","Desdamonna","Desdee","Desdemona","Desi","Desimone","Desirae","Desirea","Desireah","Desiree","Desiri","Desma","Desmond","Desmund","Dessma","Desta","Deste","Destinee","Deth","Dett","Detta","Dettmer","Deuno","Deutsch","Dev","Deva","Devan","Devaney","Dever","Devi","Devin","Devina","Devine","Devinna","Devinne","Devitt","Devland","Devlen","Devlin","Devol","Devon","Devona","Devondra","Devonna","Devonne","Devora","Devy","Dew","Dewain","Dewar","Dewayne","Dewees","Dewey","Dewhirst","Dewhurst","Dewie","Dewitt","Dex","Dexter","Dey","Dhar","Dhiman","Dhiren","Dhruv","Dhu","Dhumma","Di","Diahann","Diamante","Diamond","Dian","Diana","Diandra","Diandre","Diane","Diane-Marie","Dianemarie","Diann","Dianna","Dianne","Diannne","Diantha","Dianthe","Diao","Diarmid","Diarmit","Diarmuid","Diaz","Dib","Diba","Dibb","Dibbell","Dibbrun","Dibri","Dibrin","Dibru","Dich","Dichy","Dick","Dickens","Dickenson","Dickerson","Dickey","Dickie","Dickinson","Dickman","Dicks","Dickson","Dicky","Didi","Didier","Dido","Dieball","Diego","Diehl","Diella","Dielle","Dielu","Diena","Dierdre","Dierolf","Diet","Dieter","Dieterich","Dietrich","Dietsche","Dietz","Dikmen","Dilan","Diley","Dilisio","Dilks","Dill","Dillie","Dillon","Dilly","Dimitri","Dimitris","Dimitry","Dimmick","Dimond","Dimphia","Dina","Dinah","Dinan","Dincolo","Dine","Dinerman","Dinesh","Dinin","Dinnage","Dinnie","Dinny","Dino","Dinsdale","Dinse","Dinsmore","Diogenes","Dion","Dione","Dionis","Dionisio","Dionne","Dionysus","Dippold","Dira","Dirk","Disario","Disharoon","Disini","Diskin","Diskson","Disraeli","Dita","Ditmore","Ditter","Dittman","Dituri","Ditzel","Diver","Divine","Dix","Dixie","Dixil","Dixon","Dmitri","Dniren","Doak","Doane","Dobb","Dobbins","Doble","Dobrinsky","Dobson","Docia","Docila","Docile","Docilla","Docilu","Dodd","Dodds","Dode","Dodge","Dodi","Dodie","Dodson","Dodwell","Dody","Doe","Doehne","Doelling","Doerrer","Doersten","Doggett","Dogs","Doherty","Doi","Doig","Dola","Dolan","Dole","Doley","Dolf","Dolhenty","Doll","Dollar","Dolley","Dolli","Dollie","Dolloff","Dolly","Dolora","Dolores","Dolorita","Doloritas","Dolph","Dolphin","Dom","Domash","Dombrowski","Domel","Domela","Domella","Domenech","Domenic","Domenico","Domeniga","Domineca","Dominga","Domingo","Domini","Dominic","Dominica","Dominick","Dominik","Dominique","Dominus","Dominy","Domonic","Domph","Don","Dona","Donadee","Donaghue","Donahoe","Donahue","Donal","Donald","Donaldson","Donall","Donalt","Donata","Donatelli","Donaugh","Donavon","Donegan","Donela","Donell","Donella","Donelle","Donelson","Donelu","Doner","Donetta","Dong","Donia","Donica","Donielle","Donn","Donna","Donnamarie","Donnell","Donnelly","Donnenfeld","Donni","Donnie","Donny","Donoghue","Donoho","Donohue","Donough","Donovan","Doolittle","Doone","Dopp","Dora","Doralia","Doralin","Doralyn","Doralynn","Doralynne","Doran","Dorca","Dorcas","Dorcea","Dorcia","Dorcus","Dorcy","Dore","Doreen","Dorelia","Dorella","Dorelle","Dorena","Dorene","Doretta","Dorette","Dorey","Dorfman","Dori","Doria","Dorian","Dorice","Dorie","Dorin","Dorina","Dorinda","Dorine","Dorion","Doris","Dorisa","Dorise","Dorison","Dorita","Dorkas","Dorkus","Dorlisa","Dorman","Dorn","Doro","Dorolice","Dorolisa","Dorotea","Doroteya","Dorothea","Dorothee","Dorothi","Dorothy","Dorr","Dorran","Dorree","Dorren","Dorri","Dorrie","Dorris","Dorry","Dorsey","Dorsman","Dorsy","Dorthea","Dorthy","Dorweiler","Dorwin","Dory","Doscher","Dosh","Dosi","Dosia","Doss","Dot","Doti","Dotson","Dott","Dotti","Dottie","Dotty","Doty","Doubler","Doug","Dougal","Dougald","Dougall","Dougherty","Doughman","Doughty","Dougie","Douglas","Douglass","Dougy","Douty","Douville","Dov","Dove","Dovev","Dow","Dowd","Dowdell","Dowell","Dowlen","Dowling","Down","Downall","Downe","Downes","Downey","Downing","Downs","Dowski","Dowzall","Doxia","Doy","Doykos","Doyle","Drabeck","Dragelin","Dragon","Dragone","Dragoon","Drain","Drais","Drake","Drandell","Drape","Draper","Dray","Dre","Dream","Dreda","Dreddy","Dredi","Dreeda","Dreher","Dremann","Drescher","Dressel","Dressler","Drew","Drewett","Drews","Drexler","Dreyer","Dric","Drice","Drida","Dripps","Driscoll","Driskill","Drisko","Drislane","Drobman","Drogin","Drolet","Drona","Dronski","Drooff","Dru","Druce","Druci","Drucie","Drucill","Drucilla","Drucy","Drud","Drue","Drugge","Drugi","Drummond","Drus","Drusi","Drusie","Drusilla","Drusus","Drusy","Dry","Dryden","Drye","Dryfoos","DuBois","Duane","Duarte","Duax","Dubenko","Dublin","Ducan","Duck","Dud","Dudden","Dudley","Duer","Duester","Duff","Duffie","Duffy","Dugaid","Dugald","Dugan","Dugas","Duggan","Duhl","Duke","Dukey","Dukie","Duky","Dulce","Dulcea","Dulci","Dulcia","Dulciana","Dulcie","Dulcine","Dulcinea","Dulcle","Dulcy","Duleba","Dulla","Dulsea","Duma","Dumah","Dumanian","Dumas","Dumm","Dumond","Dun","Dunaville","Dunc","Duncan","Dunham","Dunkin","Dunlavy","Dunn","Dunning","Dunseath","Dunson","Dunstan","Dunston","Dunton","Duntson","Duong","Dupaix","Dupin","Dupre","Dupuis","Dupuy","Duquette","Dur","Durand","Durant","Durante","Durarte","Durer","Durgy","Durham","Durkee","Durkin","Durman","Durnan","Durning","Durno","Durr","Durrace","Durrell","Durrett","Durst","Durstin","Durston","Durtschi","Durward","Durware","Durwin","Durwood","Durwyn","Dusa","Dusen","Dust","Dustan","Duster","Dustie","Dustin","Dustman","Duston","Dusty","Dusza","Dutch","Dutchman","Duthie","Duval","Duvall","Duwalt","Duwe","Duyne","Dwain","Dwaine","Dwan","Dwane","Dwayne","Dweck","Dwight","Dwinnell","Dworman","Dwyer","Dyal","Dyan","Dyana","Dyane","Dyann","Dyanna","Dyanne","Dyche","Dyer","Dygal","Dygall","Dygert","Dyke","Dyl","Dylan","Dylana","Dylane","Dymoke","Dympha","Dymphia","Dyna","Dynah","Dysart","Dyson","Dyun","Dzoba","Eachelle","Eachern","Eada","Eade","Eadie","Eadith","Eadmund","Eads","Eadwina","Eadwine","Eagle","Eal","Ealasaid","Eamon","Eanore","Earl","Earla","Earle","Earleen","Earlene","Earley","Earlie","Early","Eartha","Earvin","East","Easter","Eastlake","Eastman","Easton","Eaton","Eatton","Eaves","Eb","Eba","Ebarta","Ebba","Ebbarta","Ebberta","Ebbie","Ebby","Eben","Ebeneser","Ebenezer","Eberhard","Eberhart","Eberle","Eberly","Ebert","Eberta","Eberto","Ebner","Ebneter","Eboh","Ebonee","Ebony","Ebsen","Echikson","Echo","Eckardt","Eckart","Eckblad","Eckel","Eckhardt","Eckmann","Econah","Ed","Eda","Edan","Edana","Edbert","Edd","Edda","Eddana","Eddi","Eddie","Eddina","Eddra","Eddy","Ede","Edea","Edee","Edeline","Edelman","Edelson","Edelstein","Edelsten","Eden","Edette","Edgar","Edgard","Edgardo","Edge","Edgell","Edgerton","Edholm","Edi","Edie","Edik","Edin","Edina","Edison","Edita","Edith","Editha","Edithe","Ediva","Edla","Edlin","Edlun","Edlyn","Edmanda","Edme","Edmea","Edmead","Edmee","Edmon","Edmond","Edmonda","Edmondo","Edmonds","Edmund","Edmunda","Edna","Edny","Edora","Edouard","Edra","Edrea","Edrei","Edric","Edrick","Edris","Edrock","Edroi","Edsel","Edson","Eduard","Eduardo","Eduino","Edva","Edvard","Edveh","Edward","Edwards","Edwin","Edwina","Edwine","Edwyna","Edy","Edyth","Edythe","Effie","Effy","Efram","Efrem","Efren","Efron","Efthim","Egan","Egarton","Egbert","Egerton","Eggett","Eggleston","Egide","Egidio","Egidius","Egin","Eglanteen","Eglantine","Egon","Egor","Egwan","Egwin","Ehling","Ehlke","Ehman","Ehr","Ehrenberg","Ehrlich","Ehrman","Ehrsam","Ehud","Ehudd","Eichman","Eidson","Eiger","Eileen","Eilis","Eimile","Einberger","Einhorn","Eipper","Eirena","Eirene","Eisele","Eisen","Eisenberg","Eisenhart","Eisenstark","Eiser","Eisinger","Eisler","Eiten","Ekaterina","El","Ela","Elah","Elaina","Elaine","Elana","Elane","Elata","Elatia","Elayne","Elazaro","Elbart","Elberfeld","Elbert","Elberta","Elbertina","Elbertine","Elboa","Elbring","Elburr","Elburt","Elconin","Elda","Elden","Elder","Eldin","Eldon","Eldora","Eldorado","Eldoree","Eldoria","Eldred","Eldreda","Eldredge","Eldreeda","Eldrid","Eldrida","Eldridge","Eldwen","Eldwin","Eldwon","Eldwun","Eleanor","Eleanora","Eleanore","Eleazar","Electra","Eleen","Elena","Elene","Eleni","Elenore","Eleonora","Eleonore","Eleph","Elephus","Elery","Elexa","Elfie","Elfont","Elfreda","Elfrida","Elfrieda","Elfstan","Elga","Elgar","Eli","Elia","Eliades","Elianora","Elianore","Elias","Eliason","Eliath","Eliathan","Eliathas","Elicia","Elidad","Elie","Eliezer","Eliga","Elihu","Elijah","Elinor","Elinore","Eliot","Eliott","Elisa","Elisabet","Elisabeth","Elisabetta","Elise","Elisee","Eliseo","Elish","Elisha","Elison","Elissa","Elita","Eliza","Elizabet","Elizabeth","Elka","Elke","Elkin","Ella","Elladine","Ellan","Ellard","Ellary","Ellata","Elle","Ellen","Ellene","Ellerd","Ellerey","Ellersick","Ellery","Ellett","Ellette","Ellga","Elli","Ellicott","Ellie","Ellinger","Ellingston","Elliot","Elliott","Ellis","Ellison","Ellissa","Ellita","Ellmyer","Ellon","Ellora","Ellord","Ellswerth","Ellsworth","Ellwood","Elly","Ellyn","Ellynn","Elma","Elmajian","Elmaleh","Elman","Elmer","Elmina","Elmira","Elmo","Elmore","Elna","Elnar","Elnora","Elnore","Elo","Elodea","Elodia","Elodie","Eloisa","Eloise","Elon","Elonore","Elora","Elreath","Elrod","Elroy","Els","Elsa","Elsbeth","Else","Elset","Elsey","Elsi","Elsie","Elsinore","Elson","Elspet","Elspeth","Elstan","Elston","Elsworth","Elsy","Elton","Elum","Elurd","Elva","Elvah","Elvera","Elvia","Elvie","Elvin","Elvina","Elvira","Elvis","Elvyn","Elwaine","Elwee","Elwin","Elwina","Elwira","Elwood","Elwyn","Ely","Elyn","Elyse","Elysee","Elysha","Elysia","Elyssa","Em","Ema","Emad","Emalee","Emalia","Emanuel","Emanuela","Emanuele","Emarie","Embry","Emee","Emelda","Emelen","Emelia","Emelin","Emelina","Emeline","Emelita","Emelun","Emelyne","Emera","Emerald","Emeric","Emerick","Emersen","Emerson","Emery","Emie","Emil","Emile","Emilee","Emili","Emilia","Emilie","Emiline","Emilio","Emily","Emina","Emlen","Emlin","Emlyn","Emlynn","Emlynne","Emma","Emmalee","Emmaline","Emmalyn","Emmalynn","Emmalynne","Emmanuel","Emmeline","Emmer","Emmeram","Emmerich","Emmerie","Emmery","Emmet","Emmett","Emmey","Emmi","Emmie","Emmit","Emmons","Emmott","Emmuela","Emmy","Emmye","Emogene","Emory","Emrich","Emsmus","Emyle","Emylee","Enalda","Encrata","Encratia","Encratis","End","Ender","Endo","Endor","Endora","Endres","Enenstein","Eng","Engdahl","Engeddi","Engedi","Engedus","Engel","Engelbert","Engelhart","Engen","Engenia","England","Engle","Englebert","Engleman","Englis","English","Engracia","Engud","Engvall","Enid","Ennis","Eno","Enoch","Enos","Enrica","Enrichetta","Enrico","Enrika","Enrique","Enriqueta","Ensign","Ensoll","Entwistle","Enyedy","Eoin","Eolanda","Eolande","Eph","Ephraim","Ephram","Ephrayim","Ephrem","Epifano","Epner","Epp","Epperson","Eppes","Eppie","Epps","Epstein","Er","Eradis","Eran","Eras","Erasme","Erasmo","Erasmus","Erastatus","Eraste","Erastes","Erastus","Erb","Erbe","Erbes","Erda","Erdah","Erdda","Erde","Erdei","Erdman","Erdrich","Erek","Erelia","Erena","Erfert","Ergener","Erhard","Erhart","Eri","Eric","Erica","Erich","Ericha","Erick","Ericka","Ericksen","Erickson","Erida","Erie","Eriha","Erik","Erika","Erikson","Erin","Erina","Erine","Erinn","Erinna","Erkan","Erl","Erland","Erlandson","Erle","Erleena","Erlene","Erlewine","Erlin","Erlina","Erline","Erlinna","Erlond","Erma","Ermanno","Erme","Ermeena","Ermengarde","Ermentrude","Ermey","Ermin","Ermina","Ermine","Erminia","Erminie","Erminna","Ern","Erna","Ernald","Ernaldus","Ernaline","Ernest","Ernesta","Ernestine","Ernesto","Ernestus","Ernie","Ernst","Erny","Errecart","Errick","Errol","Erroll","Erskine","Ertha","Erund","Erv","ErvIn","Ervin","Ervine","Erving","Erwin","Eryn","Esau","Esbensen","Esbenshade","Esch","Esdras","Eshelman","Eshman","Eskil","Eskill","Esma","Esmaria","Esme","Esmeralda","Esmerelda","Esmerolda","Esmond","Espy","Esra","Essa","Essam","Essex","Essie","Essinger","Essy","Esta","Estas","Esteban","Estel","Estele","Estell","Estella","Estelle","Esten","Ester","Estes","Estevan","Estey","Esther","Estis","Estrella","Estrellita","Estren","Estrin","Estus","Eta","Etam","Etan","Etana","Etem","Ethan","Ethban","Ethben","Ethbin","Ethbinium","Ethbun","Ethe","Ethel","Ethelbert","Ethelda","Ethelin","Ethelind","Ethelinda","Etheline","Ethelred","Ethelstan","Ethelyn","Ethyl","Etienne","Etka","Etoile","Etom","Etra","Etrem","Etta","Ettari","Etti","Ettie","Ettinger","Ettore","Etty","Etz","Eudo","Eudoca","Eudocia","Eudora","Eudosia","Eudoxia","Euell","Eugen","Eugene","Eugenia","Eugenides","Eugenie","Eugenio","Eugenius","Eugeniusz","Eugenle","Eugine","Euh","Eula","Eulalee","Eulalia","Eulaliah","Eulalie","Eulau","Eunice","Eupheemia","Euphemia","Euphemiah","Euphemie","Euridice","Eurydice","Eusebio","Eustace","Eustache","Eustacia","Eustashe","Eustasius","Eustatius","Eustazio","Eustis","Euton","Ev","Eva","Evadne","Evadnee","Evaleen","Evalyn","Evan","Evander","Evangelia","Evangelin","Evangelina","Evangeline","Evangelist","Evania","Evanne","Evannia","Evans","Evante","Evanthe","Evars","Eve","Eveleen","Evelin","Evelina","Eveline","Evelinn","Evelunn","Evelyn","Even","Everara","Everard","Evered","Everest","Everett","Everick","Everrs","Evers","Eversole","Everson","Evetta","Evette","Evey","Evie","Evin","Evita","Evonne","Evoy","Evslin","Evvie","Evvy","Evy","Evyn","Ewald","Ewall","Ewan","Eward","Ewart","Ewell","Ewen","Ewens","Ewer","Ewold","Eyde","Eydie","Eyeleen","Eyla","Ez","Ezana","Ezar","Ezara","Ezaria","Ezarra","Ezarras","Ezechiel","Ezekiel","Ezequiel","Eziechiele","Ezmeralda","Ezra","Ezri","Ezzo","Fabe","Faber","Fabi","Fabian","Fabiano","Fabien","Fabio","Fabiola","Fabiolas","Fablan","Fabozzi","Fabri","Fabria","Fabriane","Fabrianna","Fabrianne","Fabrice","Fabrienne","Fabrin","Fabron","Fabyola","Fachan","Fachanan","Fachini","Fadden","Faden","Fadil","Fadiman","Fae","Fagaly","Fagan","Fagen","Fagin","Fahey","Fahland","Fahy","Fai","Faina","Fair","Fairbanks","Faires","Fairfax","Fairfield","Fairleigh","Fairley","Fairlie","Fairman","Fairweather","Faith","Fakieh","Falcone","Falconer","Falda","Faletti","Faline","Falito","Falk","Falkner","Fallon","Faludi","Falzetta","Fan","Fanchan","Fanchet","Fanchette","Fanchie","Fanchon","Fancie","Fancy","Fanechka","Fanestil","Fang","Fania","Fanni","Fannie","Fanning","Fanny","Fantasia","Fante","Fanya","Far","Fara","Farah","Farand","Farant","Farhi","Fari","Faria","Farica","Farika","Fariss","Farkas","Farl","Farland","Farlay","Farlee","Farleigh","Farley","Farlie","Farly","Farman","Farmann","Farmelo","Farmer","Farnham","Farnsworth","Farny","Faro","Farr","Farra","Farrah","Farrand","Farrar","Farrel","Farrell","Farrica","Farrington","Farris","Farrish","Farrison","Farro","Farron","Farrow","Faruq","Farver","Farwell","Fasano","Faso","Fassold","Fast","Fasta","Fasto","Fates","Fatima","Fatimah","Fatma","Fattal","Faubert","Faubion","Fauch","Faucher","Faulkner","Fauman","Faun","Faunia","Faunie","Faus","Faust","Fausta","Faustena","Faustina","Faustine","Faustus","Fauver","Faux","Favata","Favian","Favianus","Favien","Favin","Favrot","Fawcett","Fawcette","Fawn","Fawna","Fawne","Fawnia","Fax","Faxan","Faxen","Faxon","Faxun","Fay","Faydra","Faye","Fayette","Fayina","Fayola","Fayre","Fayth","Faythe","Fazeli","Fe","Featherstone","February","Fechter","Fedak","Federica","Federico","Fedirko","Fedora","Fee","Feeley","Feeney","Feer","Feigin","Feil","Fein","Feinberg","Feingold","Feinleib","Feinstein","Feld","Felder","Feldman","Feldstein","Feldt","Felecia","Feledy","Felic","Felicdad","Felice","Felicia","Felicidad","Felicie","Felicio","Felicity","Felicle","Felike","Feliks","Felipa","Felipe","Felise","Felisha","Felita","Felix","Feliza","Felizio","Fellner","Fellows","Felske","Felt","Felten","Feltie","Felton","Felty","Fem","Femi","Femmine","Fen","Fendig","Fenelia","Fenella","Fenn","Fennell","Fennelly","Fenner","Fennessy","Fennie","Fenny","Fenton","Fenwick","Feodor","Feodora","Feodore","Feola","Ferd","Ferde","Ferdie","Ferdinana","Ferdinand","Ferdinanda","Ferdinande","Ferdy","Fergus","Ferguson","Feriga","Ferino","Fermin","Fern","Ferna","Fernald","Fernand","Fernanda","Fernande","Fernandes","Fernandez","Fernandina","Fernando","Fernas","Ferne","Ferneau","Fernyak","Ferrand","Ferreby","Ferree","Ferrel","Ferrell","Ferren","Ferretti","Ferri","Ferrick","Ferrigno","Ferris","Ferriter","Ferro","Ferullo","Ferwerda","Festa","Festatus","Festus","Feucht","Feune","Fevre","Fey","Fi","Fia","Fiann","Fianna","Fidel","Fidela","Fidelas","Fidele","Fidelia","Fidelio","Fidelis","Fidelity","Fidellas","Fidellia","Fiden","Fidole","Fiedler","Fiedling","Field","Fielding","Fields","Fiertz","Fiester","Fife","Fifi","Fifine","Figge","Figone","Figueroa","Filbert","Filberte","Filberto","Filemon","Files","Filia","Filiano","Filide","Filip","Filipe","Filippa","Filippo","Fillander","Fillbert","Fillender","Filler","Fillian","Filmer","Filmore","Filomena","Fin","Fina","Finbar","Finbur","Findlay","Findley","Fine","Fineberg","Finegan","Finella","Fineman","Finer","Fini","Fink","Finkelstein","Finlay","Finley","Finn","Finnegan","Finnie","Finnigan","Finny","Finstad","Finzer","Fiona","Fionna","Fionnula","Fiora","Fiore","Fiorenza","Fiorenze","Firestone","Firman","Firmin","Firooc","Fisch","Fischer","Fish","Fishback","Fishbein","Fisher","Fishman","Fisk","Fiske","Fisken","Fitting","Fitton","Fitts","Fitz","Fitzger","Fitzgerald","Fitzhugh","Fitzpatrick","Fitzsimmons","Flagler","Flaherty","Flam","Flan","Flanagan","Flanders","Flanigan","Flann","Flanna","Flannery","Flatto","Flavia","Flavian","Flavio","Flavius","Fleck","Fleda","Fleece","Fleeman","Fleeta","Fleischer","Fleisher","Fleisig","Flem","Fleming","Flemings","Flemming","Flessel","Fleta","Fletch","Fletcher","Fleur","Fleurette","Flieger","Flight","Flin","Flinn","Flint","Flip","Flita","Flo","Floeter","Flor","Flora","Florance","Flore","Florella","Florence","Florencia","Florentia","Florenza","Florette","Flori","Floria","Florian","Florida","Floridia","Florie","Florin","Florina","Florinda","Florine","Florio","Floris","Floro","Florri","Florrie","Florry","Flory","Flosi","Floss","Flosser","Flossi","Flossie","Flossy","Flower","Flowers","Floyd","Flss","Flyn","Flynn","Foah","Fogarty","Fogel","Fogg","Fokos","Folberth","Foley","Folger","Follansbee","Follmer","Folly","Folsom","Fonda","Fondea","Fong","Fons","Fonseca","Fonsie","Fontana","Fontes","Fonville","Fonz","Fonzie","Foote","Forbes","Forcier","Ford","Fording","Forelli","Forest","Forester","Forkey","Forland","Forlini","Formenti","Formica","Fornof","Forras","Forrer","Forrest","Forrester","Forsta","Forster","Forsyth","Forta","Fortier","Fortin","Fortna","Fortuna","Fortunato","Fortune","Fortunia","Fortunio","Fortunna","Forward","Foscalina","Fosdick","Foskett","Fosque","Foss","Foster","Fotina","Fotinas","Fougere","Foulk","Four","Foushee","Fowkes","Fowle","Fowler","Fox","Foy","Fraase","Fradin","Frager","Frame","Fran","France","Francene","Frances","Francesca","Francesco","Franchot","Franci","Francie","Francine","Francis","Francisca","Franciscka","Francisco","Franciska","Franciskus","Franck","Francklin","Francklyn","Franckot","Francois","Francoise","Francyne","Franek","Frangos","Frank","Frankel","Frankhouse","Frankie","Franklin","Franklyn","Franky","Franni","Frannie","Franny","Frans","Fransen","Fransis","Fransisco","Frants","Frantz","Franz","Franza","Franzen","Franzoni","Frasch","Frasco","Fraser","Frasier","Frasquito","Fraya","Frayda","Frayne","Fraze","Frazer","Frazier","Frear","Freberg","Frech","Frechette","Fred","Freda","Freddi","Freddie","Freddy","Fredek","Fredel","Fredela","Fredelia","Fredella","Fredenburg","Frederic","Frederica","Frederich","Frederick","Fredericka","Frederico","Frederigo","Frederik","Frederiksen","Frederique","Fredette","Fredi","Fredia","Fredie","Fredkin","Fredra","Fredric","Fredrick","Fredrika","Free","Freeborn","Freed","Freedman","Freeland","Freeman","Freemon","Fregger","Freida","Freiman","Fremont","French","Frendel","Frentz","Frere","Frerichs","Fretwell","Freud","Freudberg","Frey","Freya","Freyah","Freytag","Frick","Fricke","Frida","Friday","Fridell","Fridlund","Fried","Frieda","Friedberg","Friede","Frieder","Friederike","Friedland","Friedlander","Friedly","Friedman","Friedrich","Friedrick","Friend","Frierson","Fries","Frisse","Frissell","Fritts","Fritz","Fritze","Fritzie","Fritzsche","Frodeen","Frodi","Frodin","Frodina","Frodine","Froehlich","Froemming","Froh","Frohman","Frohne","Frolick","Froma","Fromma","Fronia","Fronnia","Fronniah","Frost","Fruin","Frulla","Frum","Fruma","Fry","Fryd","Frydman","Frye","Frymire","Fu","Fuchs","Fugate","Fugazy","Fugere","Fuhrman","Fujio","Ful","Fulbert","Fulbright","Fulcher","Fuld","Fulks","Fuller","Fullerton","Fulmer","Fulmis","Fulton","Fulvi","Fulvia","Fulviah","Funch","Funda","Funk","Furey","Furgeson","Furie","Furiya","Furlani","Furlong","Furmark","Furnary","Furr","Furtek","Fusco","Gaal","Gabbert","Gabbey","Gabbi","Gabbie","Gabby","Gabe","Gabel","Gabey","Gabi","Gabie","Gable","Gabler","Gabor","Gabriel","Gabriela","Gabriele","Gabriell","Gabriella","Gabrielle","Gabrielli","Gabriellia","Gabriello","Gabrielson","Gabrila","Gaby","Gad","Gaddi","Gader","Gadmann","Gadmon","Gae","Gael","Gaelan","Gaeta","Gage","Gagliano","Gagne","Gagnon","Gahan","Gahl","Gaidano","Gaige","Gail","Gaile","Gaillard","Gainer","Gainor","Gaiser","Gaither","Gaivn","Gal","Gala","Galan","Galang","Galanti","Galasyn","Galatea","Galateah","Galatia","Gale","Galen","Galer","Galina","Galitea","Gall","Gallager","Gallagher","Gallard","Gallenz","Galliett","Galligan","Galloway","Gally","Galvan","Galven","Galvin","Gamages","Gamal","Gamali","Gamaliel","Gambell","Gamber","Gambrell","Gambrill","Gamin","Gan","Ganiats","Ganley","Gannes","Gannie","Gannon","Ganny","Gans","Gant","Gapin","Gar","Garald","Garate","Garaway","Garbe","Garber","Garbers","Garceau","Garcia","Garcon","Gard","Garda","Gardal","Gardas","Gardel","Gardell","Gardener","Gardia","Gardie","Gardiner","Gardner","Gardol","Gardy","Gare","Garek","Gareri","Gareth","Garett","Garey","Garfield","Garfinkel","Gargan","Garges","Garibald","Garibold","Garibull","Gariepy","Garik","Garin","Garlaand","Garlan","Garland","Garlanda","Garlen","Garlinda","Garling","Garmaise","Garneau","Garner","Garnes","Garnet","Garnett","Garnette","Garold","Garrard","Garratt","Garrek","Garret","Garreth","Garretson","Garrett","Garrick","Garrik","Garris","Garrison","Garrity","Garrot","Garrott","Garry","Garson","Garth","Garv","Garvey","Garvin","Garvy","Garwin","Garwood","Gary","Garzon","Gascony","Gaskill","Gaskin","Gaskins","Gaspar","Gaspard","Gasparo","Gasper","Gasperoni","Gass","Gasser","Gassman","Gastineau","Gaston","Gates","Gathard","Gathers","Gati","Gatian","Gatias","Gaudet","Gaudette","Gaughan","Gaul","Gauldin","Gaulin","Gault","Gaultiero","Gauntlett","Gausman","Gaut","Gautea","Gauthier","Gautier","Gautious","Gav","Gavan","Gaven","Gavette","Gavin","Gavini","Gavra","Gavrah","Gavriella","Gavrielle","Gavrila","Gavrilla","Gaw","Gawain","Gawen","Gawlas","Gay","Gaye","Gayel","Gayelord","Gayl","Gayla","Gayle","Gayleen","Gaylene","Gayler","Gaylor","Gaylord","Gayn","Gayner","Gaynor","Gazo","Gazzo","Geaghan","Gean","Geanine","Gearalt","Gearard","Gearhart","Gebelein","Gebhardt","Gebler","Geddes","Gee","Geehan","Geer","Geerts","Geesey","Gefell","Gefen","Geffner","Gehlbach","Gehman","Geibel","Geier","Geiger","Geilich","Geis","Geiss","Geithner","Gelasias","Gelasius","Gelb","Geldens","Gelhar","Geller","Gellman","Gelman","Gelya","Gemina","Gemini","Geminian","Geminius","Gemma","Gemmell","Gemoets","Gemperle","Gen","Gena","Genaro","Gene","Genesa","Genesia","Genet","Geneva","Genevieve","Genevra","Genia","Genie","Genisia","Genna","Gennaro","Genni","Gennie","Gennifer","Genny","Geno","Genovera","Gensler","Gensmer","Gent","Gentes","Gentilis","Gentille","Gentry","Genvieve","Geof","Geoff","Geoffrey","Geoffry","Georas","Geordie","Georg","George","Georgeanna","Georgeanne","Georgena","Georges","Georgeta","Georgetta","Georgette","Georgi","Georgia","Georgiana","Georgianna","Georgianne","Georgie","Georgina","Georgine","Georglana","Georgy","Ger","Geraint","Gerald","Geralda","Geraldina","Geraldine","Gerard","Gerardo","Geraud","Gerbold","Gerda","Gerdeen","Gerdi","Gerdy","Gere","Gerek","Gereld","Gereron","Gerfen","Gerge","Gerger","Gerhan","Gerhard","Gerhardine","Gerhardt","Geri","Gerianna","Gerianne","Gerick","Gerik","Gerita","Gerius","Gerkman","Gerlac","Gerladina","Germain","Germaine","German","Germana","Germann","Germano","Germaun","Germayne","Germin","Gernhard","Gerome","Gerrald","Gerrard","Gerri","Gerrie","Gerrilee","Gerrit","Gerry","Gersham","Gershom","Gershon","Gerson","Gerstein","Gerstner","Gert","Gerta","Gerti","Gertie","Gertrud","Gertruda","Gertrude","Gertrudis","Gerty","Gervais","Gervase","Gery","Gesner","Gessner","Getraer","Getter","Gettings","Gewirtz","Ghassan","Gherardi","Gherardo","Gherlein","Ghiselin","Giacamo","Giacinta","Giacobo","Giacomo","Giacopo","Giaimo","Giamo","Gian","Giana","Gianina","Gianna","Gianni","Giannini","Giarla","Giavani","Gib","Gibb","Gibbeon","Gibbie","Gibbon","Gibbons","Gibbs","Gibby","Gibe","Gibeon","Gibert","Gibrian","Gibson","Gibun","Giddings","Gide","Gideon","Giefer","Gies","Giesecke","Giess","Giesser","Giff","Giffard","Giffer","Gifferd","Giffie","Gifford","Giffy","Gigi","Giglio","Gignac","Giguere","Gil","Gilba","Gilbart","Gilbert","Gilberta","Gilberte","Gilbertina","Gilbertine","Gilberto","Gilbertson","Gilboa","Gilburt","Gilbye","Gilchrist","Gilcrest","Gilda","Gildas","Gildea","Gilder","Gildus","Gile","Gilead","Gilemette","Giles","Gilford","Gilges","Giliana","Giliane","Gill","Gillan","Gillead","Gilleod","Gilles","Gillespie","Gillett","Gilletta","Gillette","Gilli","Gilliam","Gillian","Gillie","Gilliette","Gilligan","Gillman","Gillmore","Gilly","Gilman","Gilmer","Gilmore","Gilmour","Gilpin","Gilroy","Gilson","Giltzow","Gilud","Gilus","Gimble","Gimpel","Gina","Ginder","Gine","Ginelle","Ginevra","Ginger","Gingras","Ginni","Ginnie","Ginnifer","Ginny","Gino","Ginsberg","Ginsburg","Gintz","Ginzburg","Gio","Giordano","Giorgi","Giorgia","Giorgio","Giovanna","Giovanni","Gipps","Gipson","Gipsy","Giralda","Giraldo","Girand","Girard","Girardi","Girardo","Giraud","Girhiny","Girish","Girovard","Girvin","Gisela","Giselbert","Gisele","Gisella","Giselle","Gish","Gisser","Gitel","Githens","Gitlow","Gitt","Gittel","Gittle","Giuditta","Giule","Giulia","Giuliana","Giulietta","Giulio","Giuseppe","Giustina","Giustino","Giusto","Given","Giverin","Giza","Gizela","Glaab","Glad","Gladdie","Gladdy","Gladi","Gladine","Gladis","Gladstone","Gladwin","Gladys","Glanti","Glantz","Glanville","Glarum","Glaser","Glasgo","Glass","Glassco","Glassman","Glaudia","Glavin","Gleason","Gleda","Gleeson","Gleich","Glen","Glenda","Glenden","Glendon","Glenine","Glenn","Glenna","Glennie","Glennis","Glennon","Glialentn","Glick","Glimp","Glinys","Glogau","Glori","Gloria","Gloriana","Gloriane","Glorianna","Glory","Glover","Glovsky","Gluck","Glyn","Glynas","Glynda","Glynias","Glynis","Glynn","Glynnis","Gmur","Gnni","Goar","Goat","Gobert","God","Goda","Godard","Godart","Godbeare","Godber","Goddard","Goddart","Godden","Godderd","Godding","Goddord","Godewyn","Godfree","Godfrey","Godfry","Godiva","Godliman","Godred","Godric","Godrich","Godspeed","Godwin","Goebel","Goeger","Goer","Goerke","Goeselt","Goetz","Goff","Goggin","Goines","Gokey","Golanka","Gold","Golda","Goldarina","Goldberg","Golden","Goldenberg","Goldfarb","Goldfinch","Goldi","Goldia","Goldie","Goldin","Goldina","Golding","Goldman","Goldner","Goldshell","Goldshlag","Goldsmith","Goldstein","Goldston","Goldsworthy","Goldwin","Goldy","Goles","Golightly","Gollin","Golliner","Golter","Goltz","Golub","Gomar","Gombach","Gombosi","Gomer","Gomez","Gona","Gonagle","Gone","Gonick","Gonnella","Gonroff","Gonsalve","Gonta","Gonyea","Gonzales","Gonzalez","Gonzalo","Goober","Good","Goodard","Goodden","Goode","Goodhen","Goodill","Goodkin","Goodman","Goodrich","Goodrow","Goodson","Goodspeed","Goodwin","Goody","Goodyear","Googins","Gora","Goran","Goraud","Gord","Gordan","Gorden","Gordie","Gordon","Gordy","Gore","Goren","Gorey","Gorga","Gorges","Gorlicki","Gorlin","Gorman","Gorrian","Gorrono","Gorski","Gorton","Gosnell","Gosney","Goss","Gosselin","Gosser","Gotcher","Goth","Gothar","Gothard","Gothart","Gothurd","Goto","Gottfried","Gotthard","Gotthelf","Gottlieb","Gottuard","Gottwald","Gough","Gould","Goulden","Goulder","Goulet","Goulette","Gove","Gow","Gower","Gowon","Gowrie","Graaf","Grace","Graces","Gracia","Gracie","Gracye","Gradeigh","Gradey","Grados","Grady","Grae","Graehl","Graehme","Graeme","Graf","Graff","Graham","Graig","Grail","Gram","Gran","Grand","Grane","Graner","Granese","Grange","Granger","Grani","Grania","Graniah","Graniela","Granlund","Grannia","Granniah","Grannias","Grannie","Granny","Granoff","Grant","Grantham","Granthem","Grantland","Grantley","Granville","Grassi","Grata","Grath","Grati","Gratia","Gratiana","Gratianna","Gratt","Graubert","Gravante","Graves","Gray","Graybill","Grayce","Grayson","Grazia","Greabe","Grearson","Gredel","Greeley","Green","Greenberg","Greenburg","Greene","Greenebaum","Greenes","Greenfield","Greenland","Greenleaf","Greenlee","Greenman","Greenquist","Greenstein","Greenwald","Greenwell","Greenwood","Greer","Greerson","Greeson","Grefe","Grefer","Greff","Greg","Grega","Gregg","Greggory","Greggs","Gregoire","Gregoor","Gregor","Gregorio","Gregorius","Gregory","Gregrory","Gregson","Greiner","Grekin","Grenier","Grenville","Gresham","Greta","Gretal","Gretchen","Grete","Gretel","Grethel","Gretna","Gretta","Grevera","Grew","Grewitz","Grey","Greyso","Greyson","Greysun","Grider","Gridley","Grier","Grieve","Griff","Griffie","Griffin","Griffis","Griffith","Griffiths","Griffy","Griggs","Grigson","Grim","Grimaldi","Grimaud","Grimbal","Grimbald","Grimbly","Grimes","Grimona","Grimonia","Grindlay","Grindle","Grinnell","Gris","Griselda","Griseldis","Grishilda","Grishilde","Grissel","Grissom","Gristede","Griswold","Griz","Grizel","Grizelda","Groark","Grobe","Grochow","Grodin","Grof","Grogan","Groh","Gromme","Grondin","Gronseth","Groome","Groos","Groot","Grory","Grosberg","Groscr","Grose","Grosmark","Gross","Grossman","Grosvenor","Grosz","Grote","Grounds","Grous","Grove","Groveman","Grover","Groves","Grubb","Grube","Gruber","Grubman","Gruchot","Grunberg","Grunenwald","Grussing","Gruver","Gschu","Guadalupe","Gualterio","Gualtiero","Guarino","Gudren","Gudrin","Gudrun","Guendolen","Guenevere","Guenna","Guenzi","Guerin","Guerra","Guevara","Guglielma","Guglielmo","Gui","Guibert","Guido","Guidotti","Guilbert","Guild","Guildroy","Guillaume","Guillema","Guillemette","Guillermo","Guimar","Guimond","Guinevere","Guinn","Guinna","Guise","Gujral","Gula","Gulgee","Gulick","Gun","Gunar","Gunas","Gundry","Gunilla","Gunn","Gunnar","Gunner","Gunning","Guntar","Gunter","Gunthar","Gunther","Gunzburg","Gupta","Gurango","Gurevich","Guria","Gurias","Gurl","Gurney","Gurolinick","Gurtner","Gus","Gusba","Gusella","Guss","Gussi","Gussie","Gussman","Gussy","Gusta","Gustaf","Gustafson","Gustafsson","Gustav","Gustave","Gustavo","Gustavus","Gusti","Gustie","Gustin","Gusty","Gut","Guthrey","Guthrie","Guthry","Gutow","Guttery","Guy","Guyer","Guyon","Guzel","Gwen","Gwendolen","Gwendolin","Gwendolyn","Gweneth","Gwenette","Gwenn","Gwenneth","Gwenni","Gwennie","Gwenny","Gwenora","Gwenore","Gwyn","Gwyneth","Gwynne","Gyasi","Gyatt","Gyimah","Gylys","Gypsie","Gypsy","Gytle","Ha","Haag","Haakon","Haas","Haase","Haberman","Hach","Hachman","Hachmann","Hachmin","Hackathorn","Hacker","Hackett","Hackney","Had","Haddad","Hadden","Haden","Hadik","Hadlee","Hadleigh","Hadley","Hadria","Hadrian","Hadsall","Hadwin","Hadwyn","Haeckel","Haerle","Haerr","Haff","Hafler","Hagai","Hagan","Hagar","Hagen","Hagerman","Haggai","Haggar","Haggerty","Haggi","Hagi","Hagood","Hahn","Hahnert","Hahnke","Haida","Haig","Haile","Hailee","Hailey","Haily","Haim","Haimes","Haines","Hak","Hakan","Hake","Hakeem","Hakim","Hako","Hakon","Hal","Haland","Halbeib","Halbert","Halda","Haldan","Haldane","Haldas","Haldeman","Halden","Haldes","Haldi","Haldis","Hale","Haleigh","Haletky","Haletta","Halette","Haley","Halfdan","Halfon","Halford","Hali","Halie","Halima","Halimeda","Hall","Halla","Hallagan","Hallam","Halland","Halle","Hallee","Hallerson","Hallett","Hallette","Halley","Halli","Halliday","Hallie","Hallock","Hallsy","Hallvard","Hally","Halona","Halonna","Halpern","Halsey","Halstead","Halsted","Halsy","Halvaard","Halverson","Ham","Hama","Hamachi","Hamal","Haman","Hamann","Hambley","Hamburger","Hamel","Hamer","Hamford","Hamforrd","Hamfurd","Hamid","Hamil","Hamilton","Hamish","Hamlani","Hamlen","Hamlet","Hamlin","Hammad","Hammel","Hammer","Hammerskjold","Hammock","Hammond","Hamner","Hamnet","Hamo","Hamon","Hampton","Hamrah","Hamrnand","Han","Hana","Hanae","Hanafee","Hanako","Hanan","Hance","Hancock","Handal","Handbook","Handel","Handler","Hands","Handy","Haney","Hanford","Hanforrd","Hanfurd","Hank","Hankins","Hanleigh","Hanley","Hanna","Hannah","Hannan","Hanni","Hannibal","Hannie","Hannis","Hannon","Hannover","Hannus","Hanny","Hanover","Hans","Hanschen","Hansel","Hanselka","Hansen","Hanser","Hanshaw","Hansiain","Hanson","Hanus","Hanway","Hanzelin","Happ","Happy","Hapte","Hara","Harald","Harbard","Harberd","Harbert","Harbird","Harbison","Harbot","Harbour","Harcourt","Hardan","Harday","Hardden","Hardej","Harden","Hardi","Hardie","Hardigg","Hardin","Harding","Hardman","Hardner","Hardunn","Hardwick","Hardy","Hare","Harelda","Harewood","Harhay","Harilda","Harim","Harl","Harlamert","Harlan","Harland","Harle","Harleigh","Harlen","Harlene","Harley","Harli","Harlie","Harlin","Harlow","Harman","Harmaning","Harmon","Harmonia","Harmonie","Harmony","Harms","Harned","Harneen","Harness","Harod","Harold","Harolda","Haroldson","Haroun","Harp","Harper","Harpole","Harpp","Harragan","Harrell","Harri","Harrie","Harriet","Harriett","Harrietta","Harriette","Harriman","Harrington","Harriot","Harriott","Harris","Harrison","Harrod","Harrow","Harrus","Harry","Harshman","Harsho","Hart","Harte","Hartfield","Hartill","Hartley","Hartman","Hartmann","Hartmunn","Hartnett","Harts","Hartwell","Harty","Hartzel","Hartzell","Hartzke","Harv","Harvard","Harve","Harvey","Harvie","Harvison","Harwell","Harwill","Harwilll","Harwin","Hasan","Hasen","Hasheem","Hashim","Hashimoto","Hashum","Hasin","Haskel","Haskell","Haskins","Haslam","Haslett","Hasseman","Hassett","Hassi","Hassin","Hastie","Hastings","Hasty","Haswell","Hatch","Hatcher","Hatfield","Hathaway","Hathcock","Hatti","Hattie","Hatty","Hau","Hauck","Hauge","Haugen","Hauger","Haughay","Haukom","Hauser","Hausmann","Hausner","Havard","Havelock","Haveman","Haven","Havener","Havens","Havstad","Hawger","Hawk","Hawken","Hawker","Hawkie","Hawkins","Hawley","Hawthorn","Hax","Hay","Haya","Hayashi","Hayden","Haydon","Haye","Hayes","Hayley","Hayman","Haymes","Haymo","Hayne","Haynes","Haynor","Hayott","Hays","Hayse","Hayton","Hayward","Haywood","Hayyim","Hazaki","Hazard","Haze","Hazeghi","Hazel","Hazelton","Hazem","Hazen","Hazlett","Hazlip","Head","Heady","Healey","Healion","Heall","Healy","Heaps","Hearn","Hearsh","Heater","Heath","Heathcote","Heather","Hebbe","Hebe","Hebel","Heber","Hebert","Hebner","Hebrew","Hecht","Heck","Hecker","Hecklau","Hector","Heda","Hedberg","Hedda","Heddi","Heddie","Heddy","Hedelman","Hedgcock","Hedges","Hedi","Hedley","Hedva","Hedvah","Hedve","Hedveh","Hedvig","Hedvige","Hedwig","Hedwiga","Hedy","Heeley","Heer","Heffron","Hefter","Hegarty","Hege","Heger","Hegyera","Hehre","Heid","Heida","Heidi","Heidie","Heidt","Heidy","Heigho","Heigl","Heilman","Heilner","Heim","Heimer","Heimlich","Hein","Heindrick","Heiner","Heiney","Heinrich","Heinrick","Heinrik","Heinrike","Heins","Heintz","Heise","Heisel","Heiskell","Heisser","Hekker","Hekking","Helaina","Helaine","Helali","Helban","Helbon","Helbona","Helbonia","Helbonna","Helbonnah","Helbonnas","Held","Helen","Helena","Helene","Helenka","Helfand","Helfant","Helga","Helge","Helgeson","Hellene","Heller","Helli","Hellman","Helm","Helman","Helmer","Helms","Helmut","Heloise","Helprin","Helsa","Helse","Helsell","Helsie","Helve","Helyn","Heman","Hembree","Hemingway","Hemminger","Hemphill","Hen","Hendel","Henden","Henderson","Hendon","Hendren","Hendrick","Hendricks","Hendrickson","Hendrik","Hendrika","Hendrix","Hendry","Henebry","Heng","Hengel","Henghold","Henig","Henigman","Henka","Henke","Henleigh","Henley","Henn","Hennahane","Hennebery","Hennessey","Hennessy","Henni","Hennie","Henning","Henri","Henricks","Henrie","Henrieta","Henrietta","Henriette","Henriha","Henrik","Henrion","Henrique","Henriques","Henry","Henryetta","Henryk","Henryson","Henson","Hentrich","Hephzibah","Hephzipa","Hephzipah","Heppman","Hepsiba","Hepsibah","Hepza","Hepzi","Hera","Herald","Herb","Herbert","Herbie","Herbst","Herby","Herc","Hercule","Hercules","Herculie","Hereld","Heriberto","Heringer","Herm","Herman","Hermann","Hermes","Hermia","Hermie","Hermina","Hermine","Herminia","Hermione","Hermon","Hermosa","Hermy","Hernandez","Hernando","Hernardo","Herod","Herodias","Herold","Heron","Herr","Herra","Herrah","Herrera","Herrick","Herries","Herring","Herrington","Herriott","Herrle","Herrmann","Herrod","Hersch","Herschel","Hersh","Hershel","Hershell","Herson","Herstein","Herta","Hertberg","Hertha","Hertz","Hertzfeld","Hertzog","Herv","Herve","Hervey","Herwick","Herwig","Herwin","Herzberg","Herzel","Herzen","Herzig","Herzog","Hescock","Heshum","Hesketh","Hesky","Hesler","Hesper","Hess","Hessler","Hessney","Hesta","Hester","Hesther","Hestia","Heti","Hett","Hetti","Hettie","Hetty","Heurlin","Heuser","Hew","Hewart","Hewe","Hewes","Hewet","Hewett","Hewie","Hewitt","Hey","Heyde","Heydon","Heyer","Heyes","Heyman","Heymann","Heyward","Heywood","Hezekiah","Hi","Hibben","Hibbert","Hibbitts","Hibbs","Hickey","Hickie","Hicks","Hidie","Hieronymus","Hiett","Higbee","Higginbotham","Higgins","Higginson","Higgs","High","Highams","Hightower","Higinbotham","Higley","Hijoung","Hike","Hilaire","Hilar","Hilaria","Hilario","Hilarius","Hilary","Hilbert","Hild","Hilda","Hildagard","Hildagarde","Hilde","Hildebrandt","Hildegaard","Hildegard","Hildegarde","Hildick","Hildie","Hildy","Hilel","Hill","Hillard","Hillari","Hillary","Hilleary","Hillegass","Hillel","Hillell","Hiller","Hillery","Hillhouse","Hilliard","Hilliary","Hillie","Hillier","Hillinck","Hillman","Hills","Hilly","Hillyer","Hiltan","Hilten","Hiltner","Hilton","Him","Hime","Himelman","Hinch","Hinckley","Hinda","Hindorff","Hindu","Hines","Hinkel","Hinkle","Hinman","Hinson","Hintze","Hinze","Hippel","Hirai","Hiram","Hirasuna","Hiro","Hiroko","Hiroshi","Hirsch","Hirschfeld","Hirsh","Hirst","Hirz","Hirza","Hisbe","Hitchcock","Hite","Hitoshi","Hitt","Hittel","Hizar","Hjerpe","Hluchy","Ho","Hoag","Hoagland","Hoang","Hoashis","Hoban","Hobard","Hobart","Hobbie","Hobbs","Hobey","Hobie","Hochman","Hock","Hocker","Hodess","Hodge","Hodges","Hodgkinson","Hodgson","Hodosh","Hoebart","Hoeg","Hoehne","Hoem","Hoenack","Hoes","Hoeve","Hoffarth","Hoffer","Hoffert","Hoffman","Hoffmann","Hofmann","Hofstetter","Hogan","Hogarth","Hogen","Hogg","Hogle","Hogue","Hoi","Hoisch","Hokanson","Hola","Holbrook","Holbrooke","Holcman","Holcomb","Holden","Holder","Holds","Hole","Holey","Holladay","Hollah","Holland","Hollander","Holle","Hollenbeck","Holleran","Hollerman","Holli","Hollie","Hollinger","Hollingsworth","Hollington","Hollis","Hollister","Holloway","Holly","Holly-Anne","Hollyanne","Holman","Holmann","Holmen","Holmes","Holms","Holmun","Holna","Holofernes","Holsworth","Holt","Holton","Holtorf","Holtz","Holub","Holzman","Homans","Home","Homer","Homere","Homerus","Homovec","Honan","Honebein","Honey","Honeyman","Honeywell","Hong","Honig","Honna","Honniball","Honor","Honora","Honoria","Honorine","Hoo","Hooge","Hook","Hooke","Hooker","Hoon","Hoopen","Hooper","Hoopes","Hootman","Hoover","Hope","Hopfinger","Hopkins","Hoppe","Hopper","Horace","Horacio","Horan","Horatia","Horatio","Horatius","Horbal","Horgan","Horick","Horlacher","Horn","Horne","Horner","Hornstein","Horodko","Horowitz","Horsey","Horst","Hort","Horten","Hortensa","Hortense","Hortensia","Horter","Horton","Horvitz","Horwath","Horwitz","Hosbein","Hose","Hosea","Hoseia","Hosfmann","Hoshi","Hoskinson","Hospers","Hotchkiss","Hotze","Hough","Houghton","Houlberg","Hound","Hourigan","Hourihan","Housen","Houser","Houston","Housum","Hovey","How","Howard","Howarth","Howe","Howell","Howenstein","Howes","Howey","Howie","Howlan","Howland","Howlend","Howlond","Howlyn","Howund","Howzell","Hoxie","Hoxsie","Hoy","Hoye","Hoyt","Hrutkay","Hsu","Hu","Huai","Huan","Huang","Huba","Hubbard","Hubble","Hube","Huber","Huberman","Hubert","Huberto","Huberty","Hubey","Hubie","Hubing","Hubsher","Huckaby","Huda","Hudgens","Hudis","Hudnut","Hudson","Huebner","Huei","Huesman","Hueston","Huey","Huff","Hufnagel","Huggins","Hugh","Hughes","Hughett","Hughie","Hughmanick","Hugibert","Hugo","Hugon","Hugues","Hui","Hujsak","Hukill","Hulbard","Hulbert","Hulbig","Hulburt","Hulda","Huldah","Hulen","Hull","Hullda","Hultgren","Hultin","Hulton","Hum","Humbert","Humberto","Humble","Hume","Humfrey","Humfrid","Humfried","Hummel","Humo","Hump","Humpage","Humph","Humphrey","Hun","Hunfredo","Hung","Hungarian","Hunger","Hunley","Hunsinger","Hunt","Hunter","Huntingdon","Huntington","Huntlee","Huntley","Huoh","Huppert","Hurd","Hurff","Hurlbut","Hurlee","Hurleigh","Hurless","Hurley","Hurlow","Hurst","Hurty","Hurwit","Hurwitz","Husain","Husch","Husein","Husha","Huskamp","Huskey","Hussar","Hussein","Hussey","Huston","Hut","Hutchings","Hutchins","Hutchinson","Hutchison","Hutner","Hutson","Hutt","Huttan","Hutton","Hux","Huxham","Huxley","Hwang","Hwu","Hy","Hyacinth","Hyacintha","Hyacinthe","Hyacinthia","Hyacinthie","Hyams","Hyatt","Hyde","Hylan","Hyland","Hylton","Hyman","Hymen","Hymie","Hynda","Hynes","Hyo","Hyozo","Hyps","Hyrup","Iago","Iain","Iams","Ian","Iand","Ianteen","Ianthe","Iaria","Iaverne","Ib","Ibbetson","Ibbie","Ibbison","Ibby","Ibrahim","Ibson","Ichabod","Icken","Id","Ida","Idalia","Idalina","Idaline","Idalla","Idden","Iddo","Ide","Idel","Idelia","Idell","Idelle","Idelson","Iden","Idette","Idleman","Idola","Idolah","Idolla","Idona","Idonah","Idonna","Idou","Idoux","Idzik","Iene","Ier","Ierna","Ieso","Ietta","Iey","Ifill","Igal","Igenia","Iggie","Iggy","Iglesias","Ignace","Ignacia","Ignacio","Ignacius","Ignatia","Ignatius","Ignatz","Ignatzia","Ignaz","Ignazio","Igor","Ihab","Iiette","Iila","Iinde","Iinden","Iives","Ike","Ikeda","Ikey","Ikkela","Ilaire","Ilan","Ilana","Ilario","Ilarrold","Ilbert","Ileana","Ileane","Ilene","Iline","Ilise","Ilka","Ilke","Illa","Illene","Illona","Illyes","Ilona","Ilonka","Ilowell","Ilsa","Ilse","Ilwain","Ilysa","Ilyse","Ilyssa","Im","Ima","Imalda","Iman","Imelda","Imelida","Imena","Immanuel","Imogen","Imogene","Imojean","Imray","Imre","Imtiaz","Ina","Incrocci","Indihar","Indira","Inerney","Ines","Inesita","Ineslta","Inessa","Inez","Infeld","Infield","Ing","Inga","Ingaberg","Ingaborg","Ingalls","Ingamar","Ingar","Inge","Ingeberg","Ingeborg","Ingelbert","Ingemar","Inger","Ingham","Inglebert","Ingles","Inglis","Ingmar","Ingold","Ingra","Ingraham","Ingram","Ingrid","Ingrim","Ingunna","Ingvar","Inigo","Inkster","Inman","Inna","Innes","Inness","Innis","Inoue","Intisar","Intosh","Intyre","Inverson","Iny","Ioab","Iolande","Iolanthe","Iolenta","Ion","Iona","Iong","Iorgo","Iorgos","Iorio","Iormina","Iosep","Ioved","Iover","Ioves","Iow","Ioyal","Iphagenia","Iphigenia","Iphigeniah","Iphlgenia","Ira","Iran","Irby","Iredale","Ireland","Irena","Irene","Irfan","Iridis","Iridissa","Irina","Iris","Irisa","Irish","Irita","Irma","Irme","Irmgard","Irmina","Irmine","Irra","Irv","Irvin","Irvine","Irving","Irwin","Irwinn","Isa","Isaac","Isaacs","Isaacson","Isaak","Isabea","Isabeau","Isabel","Isabelita","Isabella","Isabelle","Isac","Isacco","Isador","Isadora","Isadore","Isahella","Isaiah","Isak","Isbel","Isbella","Isborne","Iseabal","Isherwood","Ishii","Ishmael","Ishmul","Isia","Isiah","Isiahi","Isidor","Isidora","Isidore","Isidoro","Isidro","Isis","Isla","Islaen","Island","Isle","Islean","Isleana","Isleen","Islek","Isma","Isman","Isobel","Isola","Isolda","Isolde","Isolt","Israel","Israeli","Issi","Issiah","Issie","Issy","Ita","Itagaki","Itch","Ithaman","Ithnan","Itin","Iva","Ivah","Ivan","Ivana","Ivanah","Ivanna","Ivar","Ivatts","Ive","Ivens","Iver","Ivers","Iverson","Ives","Iveson","Ivett","Ivette","Ivetts","Ivey","Ivie","Ivo","Ivon","Ivonne","Ivor","Ivory","Ivy","Iy","Iyre","Iz","Izaak","Izabel","Izak","Izawa","Izy","Izzy","Ja","Jaal","Jaala","Jaan","Jaban","Jabe","Jabez","Jabin","Jablon","Jabon","Jac","Jacenta","Jacey","Jacie","Jacinda","Jacinta","Jacintha","Jacinthe","Jacinto","Jack","Jackelyn","Jacki","Jackie","Jacklin","Jacklyn","Jackquelin","Jackqueline","Jackson","Jacky","Jaclin","Jaclyn","Jaco","Jacob","Jacoba","Jacobah","Jacobba","Jacobina","Jacobine","Jacobo","Jacobs","Jacobsen","Jacobsohn","Jacobson","Jacoby","Jacquelin","Jacqueline","Jacquelyn","Jacquelynn","Jacquenetta","Jacquenette","Jacques","Jacquet","Jacquetta","Jacquette","Jacqui","Jacquie","Jacy","Jacynth","Jada","Jadd","Jadda","Jaddan","Jaddo","Jade","Jadwiga","Jae","Jaeger","Jaehne","Jael","Jaela","Jaella","Jaenicke","Jaf","Jaffe","Jagir","Jago","Jahdai","Jahdal","Jahdiel","Jahdol","Jahn","Jahncke","Jaime","Jaime","Jaimie","Jain","Jaine","Jair","Jairia","Jake","Jakie","Jakob","Jakoba","Jala","Jalbert","Jallier","Jamaal","Jamal","Jamel","James","Jameson","Jamesy","Jamey","Jami","Jamie","Jamieson","Jamil","Jamila","Jamill","Jamilla","Jamille","Jamima","Jamin","Jamison","Jammal","Jammie","Jammin","Jamnes","Jamnis","Jan","Jana","Janaya","Janaye","Jandel","Jandy","Jane","Janean","Janeczka","Janeen","Janek","Janel","Janela","Janella","Janelle","Janene","Janenna","Janerich","Janessa","Janet","Janeta","Janetta","Janette","Janeva","Janey","Jangro","Jania","Janice","Janicki","Janie","Janifer","Janik","Janina","Janine","Janis","Janith","Janiuszck","Janka","Jankell","Jankey","Jann","Janna","Jannel","Jannelle","Jannery","Janos","Janot","Jansen","Jansson","Januarius","January","Januisz","Janus","Jany","Janyte","Japeth","Japha","Japheth","Jaqitsch","Jaquelin","Jaquelyn","Jaquenetta","Jaquenette","Jaquiss","Jaquith","Jara","Jarad","Jard","Jardena","Jareb","Jared","Jarek","Jaret","Jari","Jariah","Jarib","Jarid","Jarietta","Jarita","Jarl","Jarlath","Jarlathus","Jarlen","Jarnagin","Jarrad","Jarred","Jarrell","Jarret","Jarrett","Jarrid","Jarrod","Jarrow","Jarv","Jarvey","Jarvis","Jary","Jase","Jasen","Jasik","Jasisa","Jasmin","Jasmina","Jasmine","Jason","Jasper","Jasun","Jauch","Jaunita","Javed","Javier","Javler","Jaworski","Jay","Jaycee","Jaye","Jaylene","Jayme","Jaymee","Jaymie","Jayne","Jaynell","Jaynes","Jayson","Jazmin","Jdavie","Jea","Jean","Jean-Claude","Jeana","Jeane","Jeanelle","Jeanette","Jeanie","Jeanine","Jeanna","Jeanne","Jeannette","Jeannie","Jeannine","Jeavons","Jeaz","Jeb","Jecho","Jecoa","Jecon","Jeconiah","Jed","Jedd","Jeddy","Jedediah","Jedidiah","Jedlicka","Jedthus","Jeff","Jeffcott","Jefferey","Jeffers","Jefferson","Jeffery","Jeffie","Jeffrey","Jeffries","Jeffry","Jeffy","Jegar","Jeggar","Jegger","Jehanna","Jehiah","Jehial","Jehias","Jehiel","Jehius","Jehoash","Jehovah","Jehu","Jelena","Jelene","Jelks","Jelle","Jelsma","Jem","Jemena","Jemie","Jemima","Jemimah","Jemina","Jeminah","Jemine","Jemma","Jemmie","Jemmy","Jempty","Jemy","Jen","Jena","Jenda","Jenei","Jenelle","Jenesia","Jenette","Jeni","Jenica","Jeniece","Jenifer","Jeniffer","Jenilee","Jenine","Jenkel","Jenkins","Jenks","Jenn","Jenna","Jenne","Jennee","Jenness","Jennette","Jenni","Jennica","Jennie","Jennifer","Jennilee","Jennine","Jennings","Jenny","Jeno","Jens","Jensen","Jentoft","Jephthah","Jephum","Jepson","Jepum","Jer","Jerad","Jerald","Jeraldine","Jeralee","Jeramey","Jeramie","Jere","Jereld","Jereme","Jeremiah","Jeremias","Jeremie","Jeremy","Jeri","Jeritah","Jermain","Jermaine","Jerman","Jermayne","Jermyn","Jerol","Jerold","Jeroma","Jerome","Jeromy","Jerri","Jerrie","Jerrilee","Jerrilyn","Jerrine","Jerrol","Jerrold","Jerroll","Jerrome","Jerry","Jerrylee","Jerusalem","Jervis","Jerz","Jesh","Jesher","Jess","Jessa","Jessabell","Jessalin","Jessalyn","Jessamine","Jessamyn","Jesse","Jessee","Jesselyn","Jessen","Jessey","Jessi","Jessica","Jessie","Jessika","Jessy","Jestude","Jesus","Jeth","Jethro","Jeu","Jeunesse","Jeuz","Jevon","Jew","Jewel","Jewell","Jewelle","Jewett","Jews","Jez","Jezabel","Jezabella","Jezabelle","Jezebel","Jezreel","Ji","Jill","Jillana","Jillane","Jillayne","Jilleen","Jillene","Jilli","Jillian","Jillie","Jilly","Jim","Jimmie","Jimmy","Jinny","Jit","Jo","JoAnn","Jo-Ann","Jo-Anne","JoAnn","JoAnne","Joab","Joachim","Joachima","Joacima","Joacimah","Joan","Joana","Joane","Joanie","Joann","Joanna","Joanne","Joannes","Joao","Joappa","Joaquin","Joash","Joashus","Job","Jobe","Jobey","Jobi","Jobie","Jobina","Joby","Jobye","Jobyna","Jocelin","Joceline","Jocelyn","Jocelyne","Jochbed","Jochebed","Jock","Jocko","Jodee","Jodi","Jodie","Jodoin","Jody","Joe","Joeann","Joed","Joel","Joela","Joelie","Joell","Joella","Joelle","Joellen","Joelly","Joellyn","Joelynn","Joerg","Joete","Joette","Joey","Joh","Johan","Johanan","Johann","Johanna","Johannah","Johannes","Johannessen","Johansen","Johathan","Johen","Johiah","Johm","John","Johna","Johnath","Johnathan","Johnathon","Johnette","Johnna","Johnnie","Johnny","Johns","Johnson","Johnsson","Johnsten","Johnston","Johnstone","Johny","Johppa","Johppah","Johst","Joice","Joiner","Jojo","Joktan","Jola","Jolanta","Jolda","Jolee","Joleen","Jolene","Jolenta","Joletta","Joli","Jolie","Joliet","Joline","Jollanta","Jollenta","Joly","Jolyn","Jolynn","Jon","Jona","Jonah","Jonas","Jonathan","Jonathon","Jonati","Jone","Jonell","Jones","Jonette","Joni","Jonie","Jonina","Jonis","Jonme","Jonna","Jonny","Joo","Joon","Joost","Jopa","Jordain","Jordan","Jordana","Jordanna","Jordans","Jordanson","Jordison","Jordon","Jorey","Jorgan","Jorge","Jorgensen","Jorgenson","Jori","Jorie","Jorin","Joris","Jorrie","Jorry","Jory","Jos","Joscelin","Jose","Josee","Josefa","Josefina","Joseito","Joselow","Joselyn","Joseph","Josepha","Josephina","Josephine","Josephson","Joses","Josey","Josh","Joshi","Joshia","Joshua","Joshuah","Josi","Josiah","Josias","Josie","Josler","Joslyn","Josselyn","Josy","Jotham","Joub","Joung","Jourdain","Jourdan","Jovi","Jovia","Jovita","Jovitah","Jovitta","Jowett","Joy","Joya","Joyan","Joyann","Joyce","Joycelin","Joye","Jozef","Jsandye","Juan","Juana","Juanita","Juanne","Juback","Jud","Judah","Judas","Judd","Jude","Judenberg","Judi","Judie","Judith","Juditha","Judon","Judsen","Judson","Judus","Judy","Judye","Jueta","Juetta","Juieta","Jule","Julee","Jules","Juley","Juli","Julia","Julian","Juliana","Juliane","Juliann","Julianna","Julianne","Juliano","Julide","Julie","Julienne","Juliet","Julieta","Julietta","Juliette","Julina","Juline","Julio","Julis","Julissa","Julita","Julius","Jumbala","Jump","Jun","Juna","June","Junette","Jung","Juni","Junia","Junie","Junieta","Junina","Junius","Junji","Junko","Junna","Junno","Juno","Jurdi","Jurgen","Jurkoic","Just","Justen","Juster","Justicz","Justin","Justina","Justine","Justinian","Justinn","Justino","Justis","Justus","Juta","Jutta","Juxon","Jyoti","Kablesh","Kacerek","Kacey","Kachine","Kacie","Kacy","Kaczer","Kaden","Kadner","Kado","Kaela","Kaenel","Kaete","Kafka","Kahaleel","Kahl","Kahle","Kahler","Kahlil","Kahn","Kai","Kaia","Kaila","Kaile","Kailey","Kain","Kaine","Kaiser","Kaitlin","Kaitlyn","Kaitlynn","Kaiulani","Kaja","Kajdan","Kakalina","Kal","Kala","Kalagher","Kalasky","Kalb","Kalbli","Kale","Kaleb","Kaleena","Kalfas","Kali","Kalie","Kalikow","Kalil","Kalila","Kalin","Kalina","Kalinda","Kalindi","Kaliope","Kaliski","Kalk","Kall","Kalle","Kalli","Kallick","Kallista","Kallman","Kally","Kalman","Kalmick","Kaltman","Kalvin","Kalvn","Kam","Kama","Kamal","Kamaria","Kamat","Kameko","Kamerman","Kamila","Kamilah","Kamillah","Kamin","Kammerer","Kamp","Kampmann","Kampmeier","Kan","Kanal","Kancler","Kandace","Kandy","Kane","Kania","Kannan","Kannry","Kano","Kant","Kanter","Kantor","Kantos","Kanya","Kape","Kaplan","Kapoor","Kapor","Kappel","Kappenne","Kara","Kara-Lynn","Karalee","Karalynn","Karame","Karas","Karb","Kare","Karee","Kareem","Karel","Karen","Karena","Kari","Karia","Karie","Karil","Karilla","Karilynn","Karim","Karin","Karina","Karine","Kariotta","Karisa","Karissa","Karita","Karl","Karla","Karlan","Karlee","Karleen","Karlen","Karlene","Karlens","Karli","Karlie","Karlik","Karlin","Karlis","Karlise","Karlotta","Karlotte","Karlow","Karly","Karlyn","Karmen","Karna","Karney","Karol","Karola","Karole","Karolina","Karoline","Karoly","Karolyn","Karon","Karp","Karr","Karrah","Karrie","Karry","Karsten","Kartis","Karwan","Kary","Karyl","Karylin","Karyn","Kasevich","Kasey","Kashden","Kask","Kaslik","Kaspar","Kasper","Kass","Kassab","Kassandra","Kassaraba","Kassel","Kassey","Kassi","Kassia","Kassie","Kassity","Kast","Kat","Kata","Katalin","Kataway","Kate","Katee","Katerina","Katerine","Katey","Kath","Katha","Katharina","Katharine","Katharyn","Kathe","Katherin","Katherina","Katherine","Katheryn","Kathi","Kathie","Kathleen","Kathlene","Kathlin","Kathrine","Kathryn","Kathryne","Kathy","Kathye","Kati","Katie","Katina","Katine","Katinka","Katlaps","Katleen","Katlin","Kato","Katonah","Katrina","Katrine","Katrinka","Katsuyama","Katt","Katti","Kattie","Katuscha","Katusha","Katushka","Katy","Katya","Katz","Katzen","Katzir","Katzman","Kauffman","Kauffmann","Kaufman","Kaufmann","Kaule","Kauppi","Kauslick","Kavanagh","Kavanaugh","Kavita","Kawai","Kawasaki","Kay","Kaya","Kaycee","Kaye","Kayla","Kayle","Kaylee","Kayley","Kaylil","Kaylyn","Kayne","Kaz","Kazim","Kazimir","Kazmirci","Kazue","Kealey","Kean","Keane","Keare","Kearney","Keary","Keating","Keavy","Kee","Keefe","Keefer","Keegan","Keel","Keelby","Keele","Keeler","Keeley","Keelia","Keelin","Keely","Keen","Keenan","Keene","Keener","Keese","Keeton","Keever","Keffer","Keg","Kegan","Keheley","Kehoe","Kehr","Kei","Keifer","Keiko","Keil","Keily","Keir","Keisling","Keith","Keithley","Kela","Kelbee","Kelby","Kelcey","Kelci","Kelcie","Kelcy","Kelda","Keldah","Keldon","Kele","Keli","Keligot","Kelila","Kella","Kellby","Kellda","Kelleher","Kellen","Kellene","Keller","Kelley","Kelli","Kellia","Kellie","Kellina","Kellsie","Kelly","Kellyann","Kellyn","Kelsey","Kelsi","Kelson","Kelsy","Kelton","Kelula","Kelvin","Kelwen","Kelwin","Kelwunn","Kemble","Kemeny","Kemme","Kemp","Kempe","Kemppe","Ken","Kenay","Kenaz","Kendal","Kendall","Kendell","Kendra","Kendrah","Kendre","Kendrick","Kendricks","Kendry","Kendy","Kendyl","Kenelm","Kenison","Kenji","Kenlay","Kenlee","Kenleigh","Kenley","Kenn","Kenna","Kennan","Kennard","Kennedy","Kennet","Kenneth","Kennett","Kenney","Kennie","Kennith","Kenny","Kenon","Kenric","Kenrick","Kensell","Kent","Kenta","Kenti","Kentiga","Kentigera","Kentigerma","Kentiggerma","Kenton","Kenward","Kenway","Kenwee","Kenweigh","Kenwood","Kenwrick","Kenyon","Kenzi","Kenzie","Keon","Kepner","Keppel","Ker","Kerby","Kerek","Kerekes","Kerge","Keri","Keriann","Kerianne","Kerin","Kerk","Kerman","Kermie","Kermit","Kermy","Kern","Kernan","Kerns","Kerr","Kerri","Kerrie","Kerril","Kerrill","Kerrin","Kerrison","Kerry","Kersten","Kerstin","Kerwin","Kerwinn","Kerwon","Kery","Kesia","Kesley","Keslie","Kessel","Kessia","Kessiah","Kessler","Kester","Ketchan","Ketchum","Ketti","Kettie","Ketty","Keung","Kev","Kevan","Keven","Keverian","Keverne","Kevin","Kevina","Kevon","Kevyn","Key","Keyek","Keyes","Keynes","Keyser","Keyte","Kezer","Khai","Khajeh","Khalid","Khalil","Khalin","Khalsa","Khan","Khanna","Khano","Khichabia","Kho","Khorma","Khosrow","Khoury","Khudari","Ki","Kiah","Kial","Kidd","Kidder","Kiefer","Kieffer","Kieger","Kiehl","Kiel","Kiele","Kielty","Kienan","Kier","Kieran","Kiernan","Kiersten","Kikelia","Kiker","Kiki","Kila","Kilah","Kilan","Kilar","Kilbride","Kilby","Kile","Kiley","Kilgore","Kilian","Kilk","Killam","Killarney","Killen","Killian","Killie","Killigrew","Killion","Killoran","Killy","Kilmarx","Kilroy","Kim","Kimball","Kimbell","Kimber","Kimberlee","Kimberley","Kimberli","Kimberly","Kimberlyn","Kimble","Kimbra","Kimitri","Kimmel","Kimmi","Kimmie","Kimmy","Kimon","Kimura","Kin","Kinata","Kincaid","Kinch","Kinchen","Kind","Kindig","Kinelski","King","Kingdon","Kinghorn","Kingsbury","Kingsley","Kingsly","Kingston","Kinna","Kinnard","Kinney","Kinnie","Kinnon","Kinny","Kinsler","Kinsley","Kinsman","Kinson","Kinzer","Kiona","Kip","Kipp","Kippar","Kipper","Kippie","Kippy","Kipton","Kira","Kiran","Kirbee","Kirbie","Kirby","Kirch","Kirchner","Kiri","Kirima","Kirimia","Kirit","Kirk","Kirkpatrick","Kirkwood","Kironde","Kirsch","Kirschner","Kirshbaum","Kirst","Kirsten","Kirsteni","Kirsti","Kirstin","Kirstyn","Kirt","Kirtley","Kirven","Kirwin","Kisor","Kissee","Kissel","Kissiah","Kissie","Kissner","Kistner","Kisung","Kit","Kitchen","Kitti","Kittie","Kitty","Kiyohara","Kiyoshi","Kizzee","Kizzie","Kjersti","Klapp","Klara","Klarika","Klarrisa","Klatt","Klaus","Klayman","Klecka","Kleeman","Klehm","Kleiman","Klein","Kleinstein","Klemens","Klement","Klemm","Klemperer","Klenk","Kleon","Klepac","Kleper","Kletter","Kliber","Kliman","Kliment","Klimesh","Klina","Kline","Kling","Klingel","Klinger","Klinges","Klockau","Kloman","Klos","Kloster","Klotz","Klug","Kluge","Klump","Klusek","Klute","Knapp","Kneeland","Knepper","Knick","Knight","Knighton","Knipe","Knitter","Knobloch","Knoll","Knorring","Knowland","Knowle","Knowles","Knowling","Knowlton","Knox","Knudson","Knut","Knute","Knuth","Knutson","Ko","Koa","Koah","Koal","Koball","Kobe","Kobi","Koblas","Koblick","Koby","Kobylak","Koch","Koehler","Koenig","Koeninger","Koenraad","Koeppel","Koerlin","Koerner","Koetke","Koffler","Koffman","Koh","Kohl","Kohler","Kohn","Kokaras","Kokoruda","Kolb","Kolivas","Kolk","Koller","Kolnick","Kolnos","Kolodgie","Kolosick","Koloski","Kolva","Komara","Komarek","Komsa","Kondon","Kone","Kong","Konikow","Kono","Konopka","Konrad","Konstance","Konstantin","Konstantine","Konstanze","Konyn","Koo","Kooima","Koosis","Kopans","Kopaz","Kopp","Koppel","Kopple","Kora","Koral","Koralie","Koralle","Koran","Kordula","Kore","Korella","Koren","Korenblat","Koressa","Korey","Korff","Korfonta","Kori","Korie","Korman","Korney","Kornher","Korns","Korrie","Korry","Kort","Korten","Korwin","Korwun","Kory","Kosak","Kosaka","Kosel","Koser","Kosey","Kosiur","Koslo","Koss","Kosse","Kostival","Kostman","Kotick","Kotta","Kotto","Kotz","Kovacev","Kovacs","Koval","Kovar","Kowal","Kowalski","Kowatch","Kowtko","Koy","Koziara","Koziarz","Koziel","Kozloski","Kraft","Kragh","Krahling","Krahmer","Krakow","Krall","Kramer","Kramlich","Krantz","Kraska","Krasner","Krasnoff","Kraul","Kraus","Krause","Krauss","Kravits","Krawczyk","Kreager","Krebs","Kreda","Kreegar","Krefetz","Kreg","Kreiker","Krein","Kreindler","Kreiner","Kreis","Kreit","Kreitman","Krell","Kremer","Krenek","Krenn","Kresic","Kress","Krever","Kries","Krigsman","Krilov","Kris","Krischer","Krisha","Krishna","Krishnah","Krispin","Kriss","Krissie","Krissy","Krista","Kristal","Kristan","Kriste","Kristel","Kristen","Kristi","Kristian","Kristianson","Kristie","Kristien","Kristin","Kristina","Kristine","Kristo","Kristof","Kristofer","Kristoffer","Kristofor","Kristoforo","Kristopher","Kristos","Kristy","Kristyn","Krock","Kroll","Kronfeld","Krongold","Kronick","Kroo","Krucik","Krueger","Krug","Kruger","Krum","Krusche","Kruse","Krute","Kruter","Krutz","Krys","Kryska","Krysta","Krystal","Krystalle","Krystin","Krystle","Krystyna","Ku","Kubetz","Kubiak","Kubis","Kucik","Kudva","Kuebbing","Kuehn","Kuehnel","Kuhlman","Kuhn","Kulda","Kulseth","Kulsrud","Kumagai","Kumar","Kumler","Kung","Kunin","Kunkle","Kunz","Kuo","Kurland","Kurman","Kurr","Kursh","Kurt","Kurth","Kurtis","Kurtz","Kurtzig","Kurtzman","Kurys","Kurzawa","Kus","Kushner","Kusin","Kuska","Kussell","Kuster","Kutchins","Kuth","Kutzenco","Kutzer","Kwabena","Kwan","Kwang","Kwapong","Kwarteng","Kwasi","Kwei","Kwok","Kwon","Ky","Kyd","Kyl","Kyla","Kylah","Kylander","Kyle","Kylen","Kylie","Kylila","Kylstra","Kylynn","Kym","Kynan","Kyne","Kynthia","Kyriako","Kyrstin","Kyte","La","LaVerne","LaBaw","LaMee","LaMonica","LaMori","LaRue","LaSorella","Laaspere","Laban","Labana","Laband","Labanna","Labannah","Labors","Lacagnia","Lacee","Lacefield","Lacey","Lach","Lachance","Lachish","Lachlan","Lachman","Lachus","Lacie","Lacombe","Lacy","Lad","Ladd","Laddie","Laddy","Laden","Ladew","Ladonna","Lady","Lael","Laetitia","Laflam","Lafleur","Laforge","Lagas","Lagasse","Lahey","Lai","Laidlaw","Lail","Laina","Laine","Lainey","Laing","Laird","Lais","Laise","Lait","Laith","Laius","Lakin","Laks","Laktasic","Lal","Lala","Lalage","Lali","Lalise","Lalita","Lalitta","Lalittah","Lalla","Lallage","Lally","Lalo","Lam","Lamar","Lamarre","Lamb","Lambard","Lambart","Lambert","Lamberto","Lambertson","Lambrecht","Lamdin","Lammond","Lamond","Lamont","Lamoree","Lamoureux","Lamp","Lampert","Lamphere","Lamprey","Lamrert","Lamrouex","Lamson","Lan","Lana","Lanae","Lanam","Lananna","Lancaster","Lance","Lancelle","Lancelot","Lancey","Lanctot","Land","Landa","Landahl","Landan","Landau","Landbert","Landel","Lander","Landers","Landes","Landing","Landis","Landmeier","Landon","Landre","Landri","Landrum","Landry","Landsman","Landy","Lane","Lanette","Laney","Lanford","Lanfri","Lang","Langan","Langbehn","Langdon","Lange","Langelo","Langer","Langham","Langill","Langille","Langley","Langsdon","Langston","Lani","Lanie","Lanita","Lankton","Lanna","Lanni","Lannie","Lanny","Lansing","Lanta","Lantha","Lanti","Lantz","Lanza","Lapham","Lapides","Lapointe","Lapotin","Lara","Laraine","Larcher","Lardner","Lareena","Lareine","Larena","Larentia","Laresa","Largent","Lari","Larianna","Larimer","Larimor","Larimore","Larina","Larine","Laris","Larisa","Larissa","Lark","Larkin","Larkins","Larner","Larochelle","Laroy","Larrabee","Larrie","Larrisa","Larry","Lars","Larsen","Larson","Laryssa","Lasala","Lash","Lashar","Lashoh","Lashond","Lashonda","Lashonde","Lashondra","Lasko","Lasky","Lasley","Lasonde","Laspisa","Lasser","Lassiter","Laszlo","Lat","Latashia","Latea","Latham","Lathan","Lathe","Lathrop","Lathrope","Lati","Latia","Latif","Latimer","Latimore","Latin","Latini","Latisha","Latona","Latonia","Latoniah","Latouche","Latoya","Latoye","Latoyia","Latreece","Latreese","Latrell","Latrena","Latreshia","Latrice","Latricia","Latrina","Latt","Latta","Latterll","Lattie","Lattimer","Latton","Lattonia","Latty","Latvina","Lau","Lauber","Laubin","Laud","Lauder","Lauer","Laufer","Laughlin","Laughry","Laughton","Launce","Launcelot","Laundes","Laura","Lauraine","Laural","Lauralee","Laurance","Laure","Lauree","Laureen","Laurel","Laurella","Lauren","Laurena","Laurence","Laurene","Laurens","Laurent","Laurentia","Laurentium","Lauretta","Laurette","Lauri","Laurianne","Laurice","Laurie","Laurin","Laurinda","Laurita","Lauritz","Lauro","Lauryn","Lauter","Laux","Lauzon","Laval","Laveen","Lavella","Lavelle","Laven","Lavena","Lavern","Laverna","Laverne","Lavery","Lavina","Lavine","Lavinia","Lavinie","Lavoie","Lavona","Law","Lawford","Lawler","Lawley","Lawlor","Lawrence","Lawrenson","Lawry","Laws","Lawson","Lawton","Lawtun","Lay","Layla","Layman","Layne","Layney","Layton","Lazar","Lazare","Lazaro","Lazaruk","Lazarus","Lazes","Lazor","Lazos","Le","LeCroy","LeDoux","LeMay","LeRoy","LeVitus","Lea","Leach","Leacock","Leah","Leahey","Leake","Leal","Lean","Leanard","Leander","Leandra","Leandre","Leandro","Leann","Leanna","Leanne","Leanor","Leanora","Leaper","Lear","Leary","Leasia","Leatri","Leatrice","Leavelle","Leavitt","Leavy","Leban","Lebar","Lebaron","Lebbie","Leblanc","Lebna","Leboff","Lechner","Lecia","Leckie","Leclair","Lectra","Leda","Ledah","Ledda","Leddy","Ledeen","Lederer","Lee","LeeAnn","Leeann","Leeanne","Leede","Leeke","Leela","Leelah","Leeland","Leena","Leesa","Leese","Leesen","Leeth","Leff","Leffen","Leffert","Lefkowitz","Lefton","Leftwich","Lefty","Leggat","Legge","Leggett","Legra","Lehet","Lehman","Lehmann","Lehrer","Leia","Leibman","Leicester","Leid","Leif","Leifer","Leifeste","Leigh","Leigha","Leighland","Leighton","Leila","Leilah","Leilani","Leipzig","Leis","Leiser","Leisha","Leitao","Leith","Leitman","Lejeune","Lek","Lela","Lelah","Leland","Leler","Lelia","Lelith","Lello","Lem","Lema","Lemaceon","Lemal","Lemar","Lemcke","Lemieux","Lemire","Lemkul","Lemmie","Lemmuela","Lemmueu","Lemmy","Lemon","Lempres","Lemuel","Lemuela","Lemuelah","Len","Lena","Lenard","Lenci","Lenee","Lenes","Lenette","Lengel","Lenhard","Lenhart","Lenka","Lenna","Lennard","Lenni","Lennie","Lenno","Lennon","Lennox","Lenny","Leno","Lenora","Lenore","Lenox","Lenrow","Lenssen","Lentha","Lenwood","Lenz","Lenzi","Leo","Leod","Leodora","Leoine","Leola","Leoline","Leon","Leona","Leonanie","Leonard","Leonardi","Leonardo","Leone","Leonelle","Leonerd","Leong","Leonhard","Leoni","Leonid","Leonidas","Leonie","Leonor","Leonora","Leonore","Leonsis","Leonteen","Leontina","Leontine","Leontyne","Leopold","Leopoldeen","Leopoldine","Leor","Leora","Leotie","Lepine","Lepley","Lepp","Lepper","Lerner","Leroi","Leroy","Les","Lesak","Leschen","Lesh","Leshia","Lesko","Leslee","Lesley","Lesli","Leslie","Lesly","Lessard","Lesser","Lesslie","Lester","Lesya","Let","Leta","Letch","Letha","Lethia","Leticia","Letisha","Letitia","Letizia","Letreece","Letrice","Letsou","Letta","Lette","Letti","Lettie","Letty","Leund","Leupold","Lev","Levan","Levana","Levania","Levenson","Leventhal","Leventis","Leverett","Leverick","Leveridge","Leveroni","Levesque","Levey","Levi","Levin","Levina","Levine","Levins","Levinson","Levison","Levitan","Levitt","Levon","Levona","Levy","Lew","Lewak","Lewan","Lewanna","Lewellen","Lewendal","Lewert","Lewes","Lewie","Lewin","Lewis","Lewison","Lewiss","Lewls","Lewse","Lexi","Lexie","Lexine","Lexis","Lexy","Ley","Leyes","Leyla","Lezley","Lezlie","Lhary","Li","Lia","Liam","Lian","Liana","Liane","Lianna","Lianne","Lias","Liatrice","Liatris","Lib","Liba","Libb","Libbey","Libbi","Libbie","Libbna","Libby","Libenson","Liberati","Libna","Libnah","Liborio","Libove","Libre","Licastro","Licha","Licht","Lichtenfeld","Lichter","Licko","Lida","Lidah","Lidda","Liddie","Liddle","Liddy","Lidia","Lidstone","Lieberman","Liebermann","Liebman","Liebowitz","Liederman","Lief","Lienhard","Liesa","Lietman","Liew","Lifton","Ligetti","Liggett","Liggitt","Light","Lightfoot","Lightman","Lil","Lila","Lilac","Lilah","Lilas","Lili","Lilia","Lilian","Liliane","Lilias","Lilith","Lilithe","Lilla","Lilli","Lillian","Lillie","Lillis","Lillith","Lilllie","Lilly","Lillywhite","Lily","Lilyan","Lilybel","Lilybelle","Lim","Liman","Limann","Limber","Limbert","Limemann","Limoli","Lin","Lina","Linc","Lincoln","Lind","Linda","Lindahl","Lindberg","Lindblad","Lindbom","Lindeberg","Lindell","Lindemann","Linden","Linder","Linders","Lindgren","Lindholm","Lindi","Lindie","Lindley","Lindly","Lindner","Lindo","Lindon","Lindsay","Lindsey","Lindsley","Lindsy","Lindy","Line","Linea","Linehan","Linell","Linet","Linetta","Linette","Ling","Lingwood","Linis","Link","Linker","Linkoski","Linn","Linnea","Linnell","Linneman","Linnet","Linnette","Linnie","Linoel","Linsk","Linskey","Linson","Linus","Linzer","Linzy","Lion","Lionel","Lionello","Lipcombe","Lipfert","Lipinski","Lipkin","Lipman","Liponis","Lipp","Lippold","Lipps","Lipscomb","Lipsey","Lipski","Lipson","Lira","Liris","Lisa","Lisabet","Lisabeth","Lisan","Lisandra","Lisbeth","Liscomb","Lise","Lisetta","Lisette","Lisha","Lishe","Lisk","Lisle","Liss","Lissa","Lissak","Lissi","Lissie","Lissner","Lissy","Lister","Lita","Litch","Litha","Lithea","Litman","Litt","Litta","Littell","Little","Littlejohn","Littman","Litton","Liu","Liuka","Liv","Liva","Livesay","Livi","Livia","Livingston","Livingstone","Livvi","Livvie","Livvy","Livvyy","Livy","Liz","Liza","Lizabeth","Lizbeth","Lizette","Lizzie","Lizzy","Ljoka","Llewellyn","Llovera","Lloyd","Llywellyn","Loar","Loats","Lobel","Lobell","Lochner","Lock","Locke","Lockhart","Locklin","Lockwood","Lodge","Lodhia","Lodi","Lodie","Lodmilla","Lodovico","Lody","Loeb","Loella","Loesceke","Loferski","Loftis","Loftus","Logan","Loggia","Loggins","Loginov","Lohman","Lohner","Lohrman","Lohse","Lois","Loise","Lola","Lolande","Lolanthe","Lole","Loleta","Lolita","Lolly","Loma","Lomasi","Lomax","Lombard","Lombardi","Lombardo","Lombardy","Lon","Lona","London","Londoner","Lonee","Lonergan","Long","Longan","Longawa","Longerich","Longfellow","Longley","Longmire","Longo","Longtin","Longwood","Loni","Lonier","Lonna","Lonnard","Lonne","Lonni","Lonnie","Lonny","Lontson","Loomis","Loos","Lopes","Lopez","Lora","Lorain","Loraine","Loralee","Loralie","Loralyn","Loram","Lorant","Lord","Lordan","Loredana","Loredo","Loree","Loreen","Lorelei","Lorelie","Lorelle","Loren","Lorena","Lorene","Lorens","Lorenz","Lorenza","Lorenzana","Lorenzo","Loresz","Loretta","Lorette","Lori","Loria","Lorianna","Lorianne","Lorie","Lorien","Lorilee","Lorilyn","Lorimer","Lorin","Lorinda","Lorine","Loriner","Loring","Loris","Lorita","Lorn","Lorna","Lorne","Lorola","Lorolla","Lorollas","Lorou","Lorraine","Lorrayne","Lorri","Lorrie","Lorrimer","Lorrimor","Lorrin","Lorry","Lorsung","Lorusso","Lory","Lose","Loseff","Loss","Lossa","Losse","Lot","Lothair","Lothaire","Lothar","Lothario","Lotson","Lotta","Lotte","Lotti","Lottie","Lotty","Lotus","Lotz","Lou","Louanna","Louanne","Louella","Lough","Lougheed","Loughlin","Louie","Louis","Louisa","Louise","Louisette","Louls","Lounge","Lourdes","Lourie","Louth","Loutitia","Loux","Lovash","Lovato","Love","Lovel","Lovell","Loveridge","Lovering","Lovett","Lovich","Lovmilla","Low","Lowe","Lowell","Lowenstein","Lowenstern","Lower","Lowery","Lowis","Lowndes","Lowney","Lowrance","Lowrie","Lowry","Lowson","Loy","Loyce","Loydie","Lozano","Lozar","Lu","Luana","Luane","Luann","Luanne","Luanni","Luba","Lubba","Lubbi","Lubbock","Lubeck","Luben","Lubet","Lubin","Lubow","Luby","Luca","Lucais","Lucania","Lucas","Lucchesi","Luce","Lucey","Lucho","Luci","Lucia","Lucian","Luciana","Luciano","Lucias","Lucic","Lucie","Lucien","Lucienne","Lucier","Lucila","Lucilia","Lucilla","Lucille","Lucina","Lucinda","Lucine","Lucio","Lucita","Lucius","Luckett","Luckin","Lucky","Lucrece","Lucretia","Lucy","Lud","Ludeman","Ludewig","Ludie","Ludlew","Ludlow","Ludly","Ludmilla","Ludovick","Ludovico","Ludovika","Ludvig","Ludwig","Ludwigg","Ludwog","Luebke","Luedtke","Luehrmann","Luella","Luelle","Lugar","Lugo","Luhe","Luhey","Luht","Luigi","Luigino","Luing","Luis","Luisa","Luise","Luiza","Lukas","Lukash","Lukasz","Luke","Lukey","Lukin","Lula","Lulita","Lull","Lulu","Lumbard","Lumbye","Lumpkin","Luna","Lund","Lundberg","Lundeen","Lundell","Lundgren","Lundin","Lundquist","Lundt","Lune","Lunetta","Lunette","Lunn","Lunna","Lunneta","Lunnete","Lunseth","Lunsford","Lunt","Luo","Lupe","Lupee","Lupien","Lupita","Lura","Lurette","Lurie","Lurleen","Lurlene","Lurline","Lusa","Lussi","Lussier","Lust","Lustick","Lustig","Lusty","Lutero","Luthanen","Luther","Luttrell","Luwana","Lux","Luz","Luzader","Ly","Lyall","Lyckman","Lyda","Lydell","Lydia","Lydie","Lydon","Lyell","Lyford","Lyle","Lyman","Lymann","Lymn","Lyn","Lynch","Lynd","Lynda","Lynde","Lyndel","Lyndell","Lynden","Lyndes","Lyndon","Lyndsay","Lyndsey","Lyndsie","Lyndy","Lynea","Lynelle","Lynett","Lynette","Lynn","Lynna","Lynne","Lynnea","Lynnell","Lynnelle","Lynnet","Lynnett","Lynnette","Lynnworth","Lyns","Lynsey","Lynus","Lyon","Lyons","Lyontine","Lyris","Lysander","Lyssa","Lytle","Lytton","Lyudmila","Ma","Maag","Mab","Mabel","Mabelle","Mable","Mac","MacCarthy","MacDermot","MacDonald","MacDonell","MacDougall","MacEgan","MacFadyn","MacFarlane","MacGregor","MacGuiness","MacIlroy","MacIntosh","MacIntyre","MacKay","MacKenzie","MacLaine","MacLay","MacLean","MacLeod","MacMahon","MacMillan","MacMullin","MacNair","MacNamara","MacPherson","MacRae","MacSwan","Macario","Maccarone","Mace","Macegan","Macey","Machos","Machute","Machutte","Mack","Mackenie","Mackenzie","Mackey","Mackie","Mackintosh","Mackler","Macknair","Mackoff","Macnair","Macomber","Macri","Macur","Macy","Mada","Madai","Madaih","Madalena","Madalyn","Madancy","Madaras","Maddalena","Madden","Maddeu","Maddi","Maddie","Maddis","Maddock","Maddocks","Maddox","Maddy","Madea","Madel","Madelaine","Madeleine","Madelena","Madelene","Madelin","Madelina","Madeline","Madella","Madelle","Madelon","Madelyn","Madge","Madi","Madian","Madid","Madigan","Madison","Madlen","Madlin","Madoc","Madonia","Madonna","Madora","Madox","Madra","Madriene","Madson","Mady","Mae","Maegan","Maeve","Mafala","Mafalda","Maffa","Maffei","Mag","Magan","Magas","Magavern","Magbie","Magda","Magdaia","Magdala","Magdalen","Magdalena","Magdalene","Magdau","Magee","Magel","Magen","Magena","Mages","Maggee","Maggi","Maggie","Maggio","Maggs","Maggy","Maghutte","Magill","Magna","Magner","Magnien","Magnolia","Magnum","Magnus","Magnuson","Magnusson","Magocsi","Magree","Maguire","Magulac","Mahala","Mahalia","Mahan","Mahau","Maher","Mahla","Mahmoud","Mahmud","Mahon","Mahoney","Maia","Maiah","Maibach","Maible","Maice","Maida","Maidel","Maidie","Maidy","Maier","Maiga","Maighdiln","Maighdlin","Mailand","Main","Mainis","Maiocco","Mair","Maire","Maise","Maisel","Maisey","Maisie","Maison","Maite","Maitilde","Maitland","Maitund","Maje","Majka","Major","Mak","Makell","Maker","Mal","Mala","Malachi","Malachy","Malamud","Malamut","Malan","Malanie","Malarkey","Malaspina","Malca","Malcah","Malchus","Malchy","Malcolm","Malcom","Malda","Maleeny","Malek","Maleki","Malena","Malet","Maletta","Mali","Malia","Malik","Malin","Malina","Malinda","Malinde","Malinin","Malinowski","Malissa","Malissia","Malita","Malka","Malkah","Malkin","Mall","Mallen","Maller","Malley","Mallin","Mallina","Mallis","Mallissa","Malloch","Mallon","Mallorie","Mallory","Malloy","Malo","Malone","Maloney","Malonis","Malony","Malorie","Malory","Maloy","Malti","Maltz","Maltzman","Malva","Malvia","Malvie","Malvin","Malvina","Malvino","Malynda","Mame","Mamie","Mamoun","Man","Manaker","Manara","Manard","Manchester","Mancino","Manda","Mandal","Mandel","Mandelbaum","Mandell","Mandeville","Mandi","Mandie","Mandle","Mandler","Mandy","Mandych","Manella","Manfred","Manheim","Mani","Manley","Manlove","Manly","Mann","Mannes","Mannie","Manning","Manno","Mannos","Mannuela","Manny","Mano","Manoff","Manolo","Manon","Manouch","Mansfield","Manson","Mansoor","Mansur","Manthei","Manton","Manuel","Manuela","Manus","Manvel","Manvell","Manvil","Manville","Manwell","Manya","Mapel","Mapes","Maples","Mar","Mara","Marabel","Marabelle","Marala","Marasco","Marashio","Marbut","Marc","Marceau","Marcel","Marcela","Marcelia","Marcell","Marcella","Marcelle","Marcellina","Marcelline","Marcello","Marcellus","Marcelo","March","Marchak","Marchal","Marchall","Marchelle","Marchese","Marci","Marcia","Marciano","Marcie","Marcile","Marcille","Marcin","Marco","Marcos","Marcoux","Marcus","Marcy","Marden","Marder","Marduk","Mareah","Marek","Marela","Mareld","Marelda","Marella","Marelya","Maren","Marena","Marentic","Maressa","Maretz","Marga","Margalit","Margalo","Margaret","Margareta","Margarete","Margaretha","Margarethe","Margaretta","Margarette","Margarida","Margarita","Margaux","Marge","Margeaux","Margery","Marget","Margette","Margetts","Margherita","Margi","Margie","Margit","Margo","Margot","Margret","Margreta","Marguerie","Marguerita","Marguerite","Margy","Mari","Maria","Mariam","Marian","Mariana","Mariand","Mariande","Mariandi","Mariann","Marianna","Marianne","Mariano","Maribel","Maribelle","Maribeth","Marice","Maridel","Marie","Marie-Ann","Marie-Jeanne","Marieann","Mariejeanne","Mariel","Mariele","Marielle","Mariellen","Marienthal","Marietta","Mariette","Marigold","Marigolda","Marigolde","Marijane","Marijn","Marijo","Marika","Mariken","Mariko","Maril","Marilee","Marilin","Marilla","Marillin","Marilou","Marilyn","Marin","Marina","Marinelli","Marinna","Marino","Mario","Marion","Mariquilla","Maris","Marisa","Mariska","Marissa","Marita","Maritsa","Marius","Mariya","Marj","Marja","Marjana","Marje","Marji","Marjie","Marjorie","Marjory","Marjy","Mark","Market","Marketa","Markland","Markman","Marko","Markos","Markowitz","Marks","Markson","Markus","Marl","Marla","Marlane","Marlea","Marleah","Marlee","Marleen","Marlen","Marlena","Marlene","Marler","Marlette","Marley","Marlie","Marlin","Marline","Marlo","Marlon","Marlow","Marlowe","Marlyn","Marmaduke","Marmawke","Marmion","Marna","Marne","Marney","Marni","Marnia","Marnie","Maro","Marola","Marolda","Maroney","Marou","Marozas","Marozik","Marpet","Marquardt","Marquet","Marquez","Marquis","Marquita","Marr","Marra","Marras","Marrilee","Marrin","Marriott","Marris","Marrissa","Marron","Mars","Marsden","Marsh","Marsha","Marshal","Marshall","Marsiella","Marsland","Marston","Mart","Marta","Martainn","Marte","Marteena","Martel","Martell","Martella","Martelle","Martelli","Marten","Martens","Martguerita","Martha","Marthe","Marthena","Marti","Martica","Martie","Martijn","Martin","Martina","Martine","Martineau","Martinelli","Martinez","Martinic","Martino","Martinsen","Martinson","Martita","Martres","Martsen","Marty","Martyn","Martynne","Martz","Marucci","Marutani","Marv","Marva","Marve","Marvel","Marvella","Marven","Marvin","Marwin","Marx","Mary","Marya","Maryann","Maryanna","Maryanne","Marybella","Marybelle","Marybeth","Maryellen","Maryjane","Maryjo","Maryl","Marylee","Marylin","Marylinda","Marylou","Maryly","Marylynne","Maryn","Maryrose","Marys","Marysa","Marzi","Mas","Masao","Mascia","Masera","Masha","Mashe","Mason","Masry","Massarelli","Massey","Massie","Massimiliano","Massimo","Massingill","Masson","Mast","Mastat","Masterson","Mastic","Mastrianni","Mat","Mata","Matazzoni","Matejka","Matelda","Mateo","Materi","Materse","Mateusz","Mateya","Mathe","Matheny","Mather","Matheson","Mathew","Mathews","Mathi","Mathia","Mathian","Mathias","Mathilda","Mathilde","Mathis","Mathre","Mathur","Matias","Matilda","Matilde","Matland","Matless","Matlick","Matrona","Matronna","Matt","Matta","Mattah","Matteo","Matthaeus","Matthaus","Matthei","Mattheus","Matthew","Matthews","Matthia","Matthias","Matthieu","Matthiew","Matthus","Matti","Mattias","Mattie","Mattland","Mattox","Mattson","Matty","Matusow","Mauceri","Mauchi","Maud","Maude","Maudie","Mauer","Mauldon","Maunsell","Maupin","Maura","Mauralia","Maure","Maureen","Maureene","Maurene","Maurer","Mauretta","Maurey","Mauri","Maurice","Mauricio","Maurie","Maurili","Maurilia","Maurilla","Maurine","Maurise","Maurita","Maurits","Maurizia","Maurizio","Mauro","Maurreen","Maury","Mauve","Mavilia","Mavis","Mavra","Max","Maxa","Maxama","Maxantia","Maxentia","Maxey","Maxfield","Maxi","Maxia","Maxie","Maxim","Maxima","Maximilian","Maximilianus","Maximilien","Maximo","Maxine","Maxma","Maxwell","Maxy","May","Maya","Maybelle","Mayberry","Mayce","Mayda","Maye","Mayeda","Mayer","Mayes","Mayfield","Mayhew","Mayman","Maynard","Mayne","Maynord","Mayor","Mays","Mayworm","Maze","Mazel","Maziar","Mazlack","Mazman","Mazonson","Mazur","Mazurek","McAdams","McAfee","McAllister","McArthur","McBride","McCafferty","McCahill","McCall","McCallion","McCallum","McCandless","McCartan","McCarthy","McCarty","McClain","McClary","McClees","McClelland","McClenaghan","McClenon","McClimans","McClish","McClure","McCollum","McComb","McConaghy","McConnell","McCord","McCormac","McCormick","McCourt","McCowyn","McCoy","McCready","McCreary","McCreery","McCulloch","McCullough","McCully","McCurdy","McCutcheon","McDade","McDermott","McDonald","McDougall","McDowell","McEvoy","McFadden","McFarland","McFerren","McGannon","McGaw","McGean","McGee","McGill","McGinnis","McGrath","McGraw","McGray","McGregor","McGrody","McGruter","McGuire","McGurn","McHail","McHale","McHenry","McHugh","McIlroy","McIntosh","McIntyre","McKale","McKay","McKee","McKenna","McKenzie","McKeon","McKinney","McKnight","McLain","McLaughlin","McLaurin","McLeod","McLeroy","McLoughlin","McLyman","McMahon","McMaster","McMath","McMillan","McMullan","McMurry","McNair","McNalley","McNally","McNamara","McNamee","McNeely","McNeil","McNelly","McNully","McNutt","McQuade","McQuillin","McQuoid","McRipley","McRoberts","McSpadden","McTyre","McWherter","McWilliams","Mead","Meade","Meador","Meadow","Meadows","Meagan","Meaghan","Meagher","Meakem","Means","Meara","Meares","Mears","Meave","Mechelle","Mechling","Mecke","Meda","Medarda","Medardas","Medea","Medeah","Medin","Medina","Medlin","Medor","Medora","Medorra","Medovich","Medrek","Medwin","Meece","Meehan","Meek","Meeker","Meeks","Meenen","Meg","Megan","Megargee","Megdal","Megen","Meggi","Meggie","Meggs","Meggy","Meghan","Meghann","Mehala","Mehalek","Mehalick","Mehetabel","Mehitable","Mehta","Mei","Meibers","Meier","Meijer","Meilen","Meill","Meingolda","Meingoldas","Meir","Meisel","Meit","Mel","Mela","Melamed","Melamie","Melan","Melania","Melanie","Melantha","Melany","Melar","Melba","Melborn","Melbourne","Melburn","Melcher","Melda","Meldoh","Meldon","Melena","Melentha","Melesa","Melessa","Meletius","Melgar","Meli","Melia","Melicent","Melina","Melinda","Melinde","Melisa","Melisande","Melisandra","Melise","Melisenda","Melisent","Melissa","Melisse","Melita","Melitta","Mell","Mella","Mellar","Mellen","Melleta","Mellette","Melli","Mellicent","Mellie","Mellins","Mellisa","Mellisent","Mellitz","Mellman","Mello","Melloney","Melly","Melmon","Melnick","Melodee","Melodie","Melody","Melone","Melonie","Melony","Melosa","Melquist","Melton","Melva","Melvena","Melville","Melvin","Melvina","Melvyn","Memberg","Memory","Mena","Menard","Menashem","Mencher","Mendel","Mendelsohn","Mendelson","Mendes","Mendez","Mendie","Mendive","Mendoza","Mendy","Meneau","Menedez","Menell","Menendez","Meng","Menides","Menis","Menken","Menon","Mensch","Menzies","Mera","Meraree","Merari","Meras","Merat","Merc","Mercado","Merce","Mercedes","Merceer","Mercer","Merchant","Merci","Mercie","Mercier","Mercola","Mercorr","Mercuri","Mercy","Merdith","Meredeth","Meredi","Meredith","Meredithe","Merell","Merete","Meri","Meridel","Merideth","Meridith","Meriel","Merilee","Merill","Merilyn","Meris","Merissa","Merkle","Merkley","Merl","Merla","Merle","Merlin","Merlina","Merline","Merna","Merola","Merow","Merralee","Merras","Merrel","Merrell","Merri","Merriam","Merrick","Merridie","Merrie","Merrielle","Merril","Merrile","Merrilee","Merrili","Merrill","Merrily","Merriman","Merriott","Merritt","Merrow","Merry","Mersey","Mert","Merta","Merth","Merton","Merv","Mervin","Merwin","Merwyn","Meryl","Mesics","Messere","Messing","Meta","Metabel","Metcalf","Meter","Methuselah","Metsky","Mettah","Metts","Metzgar","Metzger","Meunier","Meurer","Meuse","Meuser","Meyer","Meyeroff","Meyers","Mezoff","Mia","Mic","Micaela","Micah","Micco","Mich","Michael","Michaela","Michaele","Michaelina","Michaeline","Michaella","Michaeu","Michail","Michal","Michale","Michaud","Miche","Micheal","Micheil","Michel","Michele","Michelina","Micheline","Michell","Michella","Michelle","Michelsen","Michey","Michi","Michigan","Michiko","Michon","Mick","Mickelson","Mickey","Micki","Mickie","Micky","Micro","Miculek","Midas","Middendorf","Middle","Middlesworth","Middleton","Mide","Midge","Midian","Midis","Mientao","Miett","Migeon","Mighell","Mignon","Mignonne","Miguel","Miguela","Miguelita","Mihalco","Mihe","Mika","Mikael","Mikaela","Mikal","Mike","Mikel","Mikes","Mikey","Miki","Mikihisa","Mikiso","Mikkanen","Mikkel","Miko","Mikol","Miksen","Mil","Mila","Milan","Milano","Milburn","Milburr","Milburt","Milda","Milde","Mildred","Mildrid","Mile","Milena","Miles","Milewski","Milford","Milicent","Milinda","Milissa","Milissent","Milka","Milks","Mill","Milla","Millan","Millar","Millard","Millburn","Millda","Miller","Millford","Millham","Millhon","Milli","Millian","Millicent","Millie","Millisent","Millman","Mills","Millur","Millwater","Milly","Milman","Milo","Milon","Milone","Milore","Milson","Milstone","Milt","Miltie","Milton","Milty","Milurd","Milzie","Mima","Mimi","Min","Mina","Minabe","Minardi","Minda","Mindi","Mindy","Miner","Minerva","Mines","Minetta","Minette","Ming","Mingche","Mini","Minica","Minier","Minna","Minnaminnie","Minne","Minni","Minnie","Minnnie","Minny","Minor","Minoru","Minsk","Minta","Minton","Mintun","Mintz","MiofMela","Miquela","Mir","Mira","Mirabel","Mirabella","Mirabelle","Miran","Miranda","Mireielle","Mireille","Mirella","Mirelle","Miriam","Mirielle","Mirilla","Mirisola","Mirna","Mirth","Miru","Mischa","Misha","Mishaan","Missi","Missie","Missy","Misti","Mistrot","Misty","Mita","Mitch","Mitchael","Mitchel","Mitchell","Mitchiner","Mitinger","Mitman","Mitran","Mittel","Mitzi","Mitzie","Mitzl","Miun","Mixie","Miyasawa","Mizuki","Mlawsky","Mllly","Moazami","Moberg","Mobley","Mochun","Mode","Modern","Modesta","Modeste","Modestia","Modestine","Modesty","Modie","Modla","Moe","Moersch","Moffat","Moffit","Moffitt","Mogerly","Moguel","Mohamed","Mohammad","Mohammed","Mohandas","Mohandis","Mohl","Mohn","Mohr","Mohsen","Mohun","Moia","Moina","Moir","Moira","Moise","Moises","Moishe","Moitoso","Mojgan","Mok","Mokas","Molini","Moll","Mollee","Molli","Mollie","Molloy","Molly","Molton","Mommy","Mona","Monaco","Monafo","Monagan","Monah","Monahan","Monahon","Monarski","Moncear","Mond","Monda","Moneta","Monetta","Mongeau","Monia","Monica","Monie","Monika","Monique","Monjan","Monjo","Monk","Monney","Monreal","Monro","Monroe","Monroy","Monson","Monsour","Mont","Montagna","Montagu","Montague","Montana","Montanez","Montano","Monte","Monteith","Monteria","Montford","Montfort","Montgomery","Monti","Monto","Monty","Moody","Mook","Moon","Mooney","Moonier","Moor","Moore","Moorefield","Moorish","Mor","Mora","Moran","Mord","Mordecai","Mordy","Moreen","Morehouse","Morel","Moreland","Morell","Morena","Moreno","Morentz","Moreta","Moretta","Morette","Moreville","Morey","Morez","Morgan","Morgana","Morganica","Morganne","Morganstein","Morgen","Morgenthaler","Morgun","Mori","Moria","Moriah","Moriarty","Morice","Morie","Morissa","Morita","Moritz","Moriyama","Morlee","Morley","Morly","Morna","Morocco","Morra","Morrell","Morrie","Morril","Morrill","Morris","Morrison","Morrissey","Morry","Morse","Mort","Morten","Mortensen","Mortie","Mortimer","Morton","Morty","Morven","Morville","Morvin","Mosa","Mosby","Moscow","Mose","Moseley","Moselle","Mosenthal","Moser","Mosera","Moses","Moshe","Moshell","Mosier","Mosira","Moskow","Mosley","Mosora","Mosra","Moss","Mossberg","Mossman","Most","Motch","Moth","Mott","Motteo","Mou","Moulden","Mouldon","Moule","Moulton","Mount","Mountford","Mountfort","Mourant","Moureaux","Mowbray","Moya","Moyer","Moyers","Moyna","Moynahan","Moyra","Mozart","Mozelle","Mozes","Mozza","Mraz","Mroz","Mueller","Muffin","Mufi","Mufinella","Muhammad","Muir","Muire","Muirhead","Mukerji","Mukul","Mukund","Mulcahy","Mulderig","Muldon","Mulford","Mullane","Mullen","Muller","Mulligan","Mullins","Mulloy","Mulry","Mulvihill","Mumford","Mun","Muna","Munafo","Muncey","Mundford","Mundt","Mundy","Munford","Mungo","Mungovan","Munmro","Munn","Munniks","Munro","Munroe","Muns","Munsey","Munshi","Munson","Munster","Munt","Mur","Murage","Muraida","Murat","Murdocca","Murdoch","Murdock","Mureil","Muriah","Murial","Muriel","Murielle","Murphy","Murrah","Murray","Murrell","Murry","Murtagh","Murtha","Murton","Murvyn","Musa","Muscolo","Musetta","Musette","Mushro","Muslim","Musser","Mussman","Mutz","My","Mya","Myca","Mycah","Mychael","Mychal","Myer","Myers","Myke","Mylan","Mylander","Myles","Mylo","Mylor","Myna","Myo","Myra","Myrah","Myranda","Myriam","Myrilla","Myrle","Myrlene","Myrna","Myron","Myrt","Myrta","Myrtia","Myrtice","Myrtie","Myrtle","Myrvyn","Myrwyn","Na","Naam","Naaman","Naamana","Naamann","Naara","Naarah","Naashom","Nabal","Nabala","Nabalas","Nabila","Nace","Nachison","Nada","Nadab","Nadaba","Nadabas","Nadabb","Nadabus","Nadaha","Nadbus","Nadda","Nadean","Nadeau","Nadeen","Nader","Nadia","Nadine","Nadiya","Nadler","Nador","Nady","Nadya","Nafis","Naga","Nagel","Nagey","Nagle","Nagy","Nahama","Nahamas","Nahshon","Nahshu","Nahshun","Nahshunn","Nahtanha","Nahum","Naiditch","Naima","Naji","Nakada","Nakashima","Nakasuji","Nalani","Nalda","Naldo","Nalepka","Nally","Nalor","Nam","Naman","Namara","Names","Nan","Nana","Nananne","Nance","Nancee","Nancey","Nanci","Nancie","Nancy","Nandor","Nanete","Nanette","Nani","Nanice","Nanine","Nanji","Nannette","Nanni","Nannie","Nanny","Nanon","Naoma","Naomi","Naor","Nap","Napier","Naples","Napoleon","Nappie","Nappy","Naquin","Nara","Narah","Narayan","Narcho","Narcis","Narcissus","Narda","Naresh","Nari","Nariko","Narine","Narra","Narton","Nary","Nash","Nashbar","Nashner","Nasho","Nashom","Nashoma","Nasia","Nason","Nassi","Nassir","Nastassia","Nasya","Nat","Nata","Natal","Natala","Natale","Natalee","Natalia","Natalie","Natalina","Nataline","Natalya","Nataniel","Natascha","Natasha","Natassia","Nate","Natelson","Nath","Nathalia","Nathalie","Nathan","Nathanael","Nathanial","Nathaniel","Nathanil","Nathanson","Natica","Natie","Natiha","Natika","Nations","Natividad","Natka","Nattie","Natty","Nava","Navada","Naval","Navarro","Nawrocki","Nay","Naylor","Nazar","Nazario","Nazarius","Nazler","Nea","Neal","Neala","Nealah","Neale","Nealey","Neall","Nealon","Nealson","Nealy","Neau","Ned","Neda","Nedda","Neddie","Neddra","Neddy","Nedi","Nedra","Nedrah","Nedrud","Nedry","Nee","Neel","Neela","Neelon","Neely","Neeoma","Nefen","Neff","Negris","Nehemiah","Neibart","Neidhardt","Neil","Neila","Neile","Neill","Neilla","Neille","Neils","Neilson","Neiman","Neisa","Nel","Nela","Nelan","Nelda","Nelia","Nelie","Nell","Nella","Nellda","Nelle","Nelli","Nellie","Nellir","Nelly","Nelrsa","Nels","Nelsen","Nelson","Nema","Nemhauser","Nena","Nenney","Neo","Neom","Neoma","Neomah","Neona","Nepean","Nepil","Nereen","Nereids","Nereus","Neri","Nerin","Nerine","Nerissa","Nerita","Nerland","Nero","Neron","Nert","Nerta","Nerte","Nerti","Nertie","Nerty","Nesbitt","Nesline","Neslund","Ness","Nessa","Nessi","Nessie","Nessim","Nessy","Nesta","Nester","Nesto","Nestor","Nett","Netta","Nette","Netti","Nettie","Nettle","Netty","Neu","Neuberger","Neuburger","Neufer","Neukam","Neumann","Neumark","Neumeyer","Neurath","Nev","Neva","Nevada","Nevai","Neve","Neveda","Nevil","Nevile","Neville","Nevin","Nevins","Nevlin","Nevsa","New","Newberry","Newbill","Newbold","Newby","Newcomb","Newcomer","Newel","Newell","Newfeld","Newhall","Newkirk","Newlin","Newman","Newmann","Newmark","Newsom","Newton","Neysa","Ng","Ngo","Nguyen","Niabi","Nial","Niall","Nibbs","Nic","Nica","Niccolo","Nich","Nichani","Nichol","Nichola","Nicholas","Nichole","Nicholl","Nicholle","Nichols","Nicholson","Nichy","Nick","Nickelsen","Nickerson","Nickey","Nicki","Nickie","Nickles","Nicko","Nickola","Nickolai","Nickolas","Nickolaus","Nicks","Nicky","Nico","Nicodemus","Nicol","Nicola","Nicolai","Nicolais","Nicolas","Nicolau","Nicole","Nicolea","Nicolella","Nicolette","Nicoli","Nicolina","Nicoline","Nicolis","Nicolle","Nidia","Nidorf","Nieberg","Niehaus","Niel","Niela","Niels","Nielsen","Nielson","Nierman","Nies","Nievelt","Nigel","Nightingale","Nihhi","Nihi","Nika","Nikaniki","Nike","Niki","Nikita","Nikki","Nikkie","Niklaus","Niko","Nikola","Nikolai","Nikolaos","Nikolas","Nikolaus","Nikoletta","Nikolia","Nikolos","Nikos","Nil","Nila","Nile","Niles","Nilla","Nils","Nilson","Nimesh","Nimocks","Nims","Nina","Nine","Ninetta","Ninette","Ninnetta","Ninnette","Nino","Ninon","Ninos","Niobe","Nipha","Niple","Nisa","Nisbet","Nisen","Nishi","Nissa","Nisse","Nissensohn","Nissie","Nissy","Nita","Nitin","Nitz","Nitza","Niu","Niven","Nixie","Nixon","Noach","Noah","Noak","Noakes","Noam","Noami","Nobe","Nobel","Nobell","Nobie","Nobile","Noble","Noby","Nochur","Nodab","Nodababus","Nodarse","Noe","Noel","Noelani","Noell","Noella","Noelle","Noellyn","Noelyn","Noemi","Nogas","Noguchi","Nola","Nolan","Nolana","Noland","Nole","Noleta","Noletta","Noli","Nolie","Nolita","Nolitta","Noll","Nollie","Nolly","Nolte","Noma","Noman","Nomi","Nona","Nonah","Noni","Nonie","Nonna","Nonnah","Noonan","Noonberg","Nor","Nora","Norah","Norbert","Norbie","Norby","Nord","Nordgren","Nordin","Nordine","Nore","Norean","Noreen","Norene","Norford","Norina","Norine","Norita","Nork","Norling","Norm","Norma","Normalie","Norman","Normand","Normandy","Normi","Normie","Normy","Norri","Norrie","Norris","Norrv","Norry","Norse","North","Northey","Northington","Northrop","Northrup","Northway","Norton","Norty","Norval","Norvall","Norvan","Norvell","Norven","Norvil","Norvin","Norvol","Norvun","Norward","Norwood","Norword","Nottage","Nova","Novah","Novak","Novelia","Novello","Novia","Novick","Novikoff","Nowell","Noyes","Nozicka","Nudd","Nugent","Nuli","Nunci","Nuncia","Nunciata","Nunes","Nunnery","Nur","Nuri","Nuriel","Nuris","Nurse","Nussbaum","Nutter","Nuzzi","Nyberg","Nydia","Nye","Nyhagen","Nysa","Nyssa","O'Hara","O'Neill","Oak","Oakes","Oakie","Oakleil","Oakley","Oakman","Oaks","Oates","Oatis","Oba","Obadiah","Obadias","Obala","Oballa","Obara","Obau","Obaza","Obbard","Obe","Obed","Obeded","Obediah","Obel","Obelia","Obellia","Obeng","Ober","Oberg","Oberheim","Oberon","Oberstone","Obidiah","Obie","Obla","Obola","Obrien","Oby","Oca","Ocana","Ochs","Ocker","Ocko","Oconnor","Octave","Octavia","Octavian","Octavie","Octavius","Octavla","Octavus","Odab","Odawa","Ode","Odeen","Odel","Odele","Odelet","Odelia","Odelinda","Odell","Odella","Odelle","Odericus","Odessa","Odetta","Odette","Odey","Odie","Odilia","Odille","Odilo","Odin","Odine","Odlo","Odo","Odom","Odoric","Odrick","Ody","Odysseus","Odyssey","Oech","Oeflein","Oehsen","Ofelia","Ofella","Offen","Ofilia","Ofori","Og","Ogata","Ogawa","Ogdan","Ogden","Ogdon","Ogg","Ogilvie","Ogilvy","Oglesby","Ogren","Ohara","Ohare","Ohaus","Ohl","Oilla","Oina","Oira","Okajima","Okechuku","Okubo","Okun","Okwu","Ola","Olaf","Olag","Olatha","Olathe","Olav","Olcott","Old","Older","Olds","Ole","Oleg","Olen","Olenka","Olenolin","Olenta","Oler","Oleta","Oletha","Olethea","Oletta","Olette","Olfe","Olga","Olia","Oliana","Olimpia","Olin","Olinde","Oliva","Olivann","Olive","Oliver","Olivero","Olivette","Olivia","Olivie","Olivier","Oliviero","Oliy","Ollayos","Olli","Ollie","Olly","Olmstead","Olmsted","Olnay","Olnee","Olnek","Olney","Olnton","Olodort","Olpe","Olsen","Olsewski","Olshausen","Olson","Olsson","Olva","Olvan","Olwen","Olwena","Oly","Olympe","Olympia","Olympias","Olympie","Olympium","Om","Oman","Omar","Omari","Omarr","Omer","Omero","Omidyar","Omland","Omor","Omora","Omura","On","Ona","Onder","Ondine","Ondrea","Ondrej","Oneal","Oneida","Oneil","Oneill","Onfre","Onfroi","Ong","Ongun","Oni","Onia","Onida","Oniskey","Onofredo","Onstad","Ontina","Ontine","Onyx","Oona","Opal","Opalina","Opaline","Ophelia","Ophelie","Oppen","Opportina","Opportuna","Ora","Orabel","Orabelle","Oralee","Oralia","Oralie","Oralla","Oralle","Oram","Oran","Orazio","Orbadiah","Orban","Ordway","Orel","Orelee","Orelia","Orelie","Orella","Orelle","Orelu","Oren","Orest","Oreste","Orestes","Orferd","Orfield","Orfinger","Orford","Orfurd","Orgel","Orgell","Ori","Oria","Orian","Oriana","Oriane","Orianna","Oribel","Oribella","Oribelle","Oriel","Orin","Oringa","Oringas","Oriole","Orion","Orit","Orji","Orlan","Orland","Orlando","Orlanta","Orlantha","Orlena","Orlene","Orlina","Orling","Orlosky","Orlov","Orly","Orman","Ormand","Orme","Ormiston","Ormond","Orms","Ormsby","Orna","Ornas","Ornie","Ornstead","Orola","Orose","Orozco","Orpah","Orpha","Orpheus","Orr","Orran","Orren","Orrin","Orsa","Orsay","Orsini","Orsino","Orsola","Orson","Orten","Ortensia","Orth","Orthman","Ortiz","Orton","Ortrud","Ortrude","Oruntha","Orv","Orva","Orvah","Orvan","Orvas","Orvie","Orvil","Orville","Orwin","Os","Osana","Osanna","Osber","Osbert","Osborn","Osborne","Osbourn","Osbourne","Oscar","Osei","Osgood","Osher","Oshinski","Osi","Osithe","Oskar","Osman","Osmen","Osmo","Osmond","Osmund","Osric","Osrick","Osrock","Ossie","Osswald","Ossy","Ostap","Oster","Osterhus","Ostler","Ostraw","Osugi","Oswal","Oswald","Oswell","Oswin","Osy","Osyth","Ot","Otero","Otes","Otha","Othe","Othelia","Othella","Othello","Other","Othilia","Othilie","Otho","Otila","Otilia","Otina","Otis","Ott","Ottavia","Otte","Otter","Otti","Ottie","Ottilie","Ottillia","Ottinger","Otto","Oulman","Outhe","Outlaw","Ovid","Ovida","Owades","Owain","Owen","Owena","Owens","Oxford","Oxley","Oys","Oz","Oza","Ozan","Ozen","Ozkum","Ozmo","Ozzie","Ozzy","O'Brien","O'Callaghan","O'Carroll","O'Connell","O'Conner","O'Connor","O'Dell","O'Doneven","O'Donnell","O'Donoghue","O'Donovan","O'Driscoll","O'Gowan","O'Grady","O'Hara","O'Kelly","O'Mahony","O'Malley","O'Meara","O'Neil","O'Neill","O'Reilly","O'Rourke","O'Shee","O'Toole","Paapanen","Pablo","Pace","Pacheco","Pachston","Pachton","Pacian","Pacien","Pacifa","Pacifica","Pacificas","Pacificia","Pack","Packer","Packston","Packton","Paco","Pacorro","Paddie","Paddy","Padegs","Paderna","Padget","Padgett","Padraic","Padraig","Padriac","Paff","Pagas","Page","Pages","Paget","Pahl","Paige","Paik","Pail","Pain","Paine","Painter","Palecek","Palermo","Palestine","Paley","Palgrave","Palila","Pall","Palla","Palladin","Pallas","Pallaten","Pallaton","Pallua","Palm","Palma","Palmer","Palmira","Palmore","Palocz","Paloma","Pals","Palua","Paluas","Palumbo","Pam","Pamela","Pamelina","Pamella","Pammi","Pammie","Pammy","Pampuch","Pan","Panaggio","Panayiotis","Panchito","Pancho","Pandich","Pandolfi","Pandora","Pang","Pangaro","Pani","Pansie","Pansir","Pansy","Panta","Panter","Panthea","Pantheas","Panther","Panthia","Pantia","Pantin","Paola","Paolina","Paolo","Papagena","Papageno","Pape","Papert","Papke","Papotto","Papp","Pappano","Pappas","Papst","Paquito","Par","Paradies","Parcel","Pardew","Pardner","Pardo","Pardoes","Pare","Parent","Paresh","Parette","Parfitt","Parhe","Parik","Paris","Parish","Park","Parke","Parker","Parks","Parlin","Parnas","Parnell","Parrie","Parris","Parrisch","Parrish","Parrnell","Parrott","Parry","Parsaye","Parshall","Parsifal","Parsons","Partan","Parthen","Parthena","Parthenia","Parthinia","Particia","Partridge","Paryavi","Pas","Pasadis","Pasahow","Pascal","Pascale","Pascasia","Pascha","Paschasia","Pascia","Pasco","Pascoe","Pasho","Pasia","Paske","Pasol","Pasquale","Pass","Past","Pastelki","Pat","Pate","Paten","Paterson","Pathe","Patience","Patin","Patman","Patnode","Paton","Patric","Patrica","Patrice","Patrich","Patricia","Patricio","Patrick","Patrizia","Patrizio","Patrizius","Patsis","Patsy","Patt","Pattani","Patten","Patterman","Patterson","Patti","Pattie","Pattin","Pattison","Patton","Patty","Paucker","Paugh","Pauiie","Paul","Paula","Paule","Pauletta","Paulette","Pauli","Paulie","Paulina","Pauline","Paulita","Paulo","Paulsen","Paulson","Pauly","Pauwles","Pavel","Paver","Pavia","Pavier","Pavior","Paviour","Pavkovic","Pavla","Pavlish","Pavlov","Pavyer","Pawsner","Pax","Paxon","Paxton","Paymar","Payne","Paynter","Payson","Payton","Paz","Paza","Pazia","Pazice","Pazit","Peace","Peacock","Peadar","Peale","Pearce","Pearl","Pearla","Pearle","Pearline","Pearlman","Pearlstein","Pearman","Pears","Pearse","Pearson","Pease","Peatroy","Pebrook","Peck","Peckham","Pedaiah","Pedaias","Peddada","Peder","Pedersen","Pederson","Pedrick","Pedro","Pedrotti","Pedroza","Peer","Peers","Peery","Peg","Pega","Pegasus","Pegeen","Pegg","Peggi","Peggie","Peggir","Peggy","Pegma","Peh","Peirce","Peirsen","Peisch","Pejsach","Pelag","Pelaga","Pelage","Pelagi","Pelagia","Pelagias","Pell","Pellegrini","Pellet","Pelletier","Pelligrini","Pellikka","Pelmas","Pelpel","Pelson","Peltier","Peltz","Pember","Pembroke","Pembrook","Pen","Pena","Pence","Pendergast","Pendleton","Penelopa","Penelope","Pengelly","Penhall","Penland","Penman","Penn","Pennebaker","Penney","Penni","Pennie","Pennington","Penny","Penoyer","Penrod","Penrose","Pentha","Penthea","Pentheam","Pentheas","Peonir","Peony","Peoples","Pepe","Peper","Pepi","Pepillo","Pepin","Pepita","Pepito","Peppard","Peppel","Pepper","Peppi","Peppie","Peppy","Per","Perce","Perceval","Percival","Percy","Perdita","Peregrine","Pergrim","Peri","Peria","Perice","Perkin","Perkins","Perkoff","Perl","Perla","Perle","Perlie","Perlis","Perlman","Perloff","Pernas","Pernell","Perni","Pernick","Pero","Perot","Perpetua","Perr","Perreault","Perren","Perretta","Perri","Perrie","Perrin","Perrine","Perrins","Perron","Perry","Persas","Perseus","Persian","Persis","Persons","Persse","Persson","Perusse","Perzan","Pesek","Peskoff","Pessa","Pestana","Pet","Peta","Pete","Peter","Peterec","Peterman","Peters","Petersen","Peterson","Peterus","Petes","Petey","Peti","Petie","Petigny","Petit","Petite","Petr","Petra","Petracca","Petras","Petrick","Petrie","Petrina","Petrine","Petromilli","Petronella","Petronia","Petronilla","Petronille","Petta","Pettifer","Pettiford","Pettit","Petty","Petua","Petula","Petulah","Petulia","Petunia","Petuu","Peugia","Peursem","Pevzner","Peyter","Peyton","Pfaff","Pfeffer","Pfeifer","Pfister","Pfosi","Phaedra","Phaidra","Phaih","Phail","Phalan","Pharaoh","Phare","Phares","Phebe","Phedra","Phelan","Phelgen","Phelgon","Phelia","Phelips","Phelps","Phemia","Phene","Pheni","Phenica","Phenice","Phi","Phia","Phil","Phila","Philan","Philana","Philander","Philbert","Philbin","Philbo","Philbrook","Philcox","Philemol","Philemon","Philender","Philina","Philine","Philip","Philipa","Philipines","Philipp","Philippa","Philippe","Philippine","Philipps","Philips","Philipson","Philis","Phillada","Phillane","Phillida","Phillie","Phillip","Phillipe","Phillipp","Phillips","Phillis","Philly","Philo","Philomena","Philoo","Philpot","Philps","Phina","Phineas","Phio","Phiona","Phionna","Phip","Phippen","Phipps","Phira","Phoebe","Phonsa","Photima","Photina","Phox","Phyl","Phylis","Phyllida","Phyllis","Phyllys","Phylys","Pia","Piane","Picardi","Picco","Pich","Pickar","Pickard","Pickens","Picker","Pickering","Pickett","Pickford","Piderit","Piefer","Piegari","Pier","Pierce","Pierette","Piero","Pierpont","Pierre","Pierrepont","Pierrette","Pierro","Piers","Pierson","Pieter","Pietje","Pietra","Pietrek","Pietro","Pigeon","Piggy","Pike","Pilar","Pilloff","Pillow","Pillsbury","Pimbley","Pincas","Pinchas","Pincince","Pinckney","Pincus","Pine","Pinebrook","Pineda","Pinelli","Pinette","Ping","Pinkerton","Pinkham","Pinsky","Pinter","Pinto","Pinzler","Piotr","Pip","Piper","Pippa","Pippas","Pippo","Pippy","Pirali","Pirbhai","Pirnot","Pironi","Pirozzo","Pirri","Pirzada","Pisano","Pisarik","Piscatelli","Piselli","Pish","Pitarys","Pitchford","Pitt","Pittel","Pittman","Pitts","Pitzer","Pius","Piwowar","Pizor","Placeeda","Placia","Placida","Placidia","Placido","Plafker","Plank","Plantagenet","Plante","Platas","Plate","Plath","Plato","Platon","Platt","Platto","Platus","Player","Pleasant","Pleione","Plerre","Pliam","Pliner","Pliske","Ploch","Ploss","Plossl","Plotkin","Plumbo","Plume","Plunkett","Plusch","Podvin","Pogue","Poirer","Pokorny","Pol","Polad","Polak","Poland","Polard","Polash","Poler","Poliard","Polik","Polinski","Polish","Politi","Polito","Polivy","Polk","Polky","Poll","Pollack","Pollak","Pollard","Pollerd","Pollie","Pollitt","Polloch","Pollock","Pollux","Polly","Pollyanna","Pomcroy","Pomeroy","Pomfret","Pomfrey","Pomona","Pompea","Pompei","Ponce","Pond","Pontias","Pontius","Ponton","Pontone","Pontus","Ponzo","Poock","Pooh","Pooi","Pool","Poole","Pooley","Poore","Pope","Popele","Popelka","Poppas","Popper","Poppo","Poppy","Porche","Porcia","Poree","Porett","Port","Porta","Porte","Porter","Portia","Portie","Portingale","Portland","Portugal","Portuna","Portwin","Portwine","Porty","Porush","Posehn","Posner","Possing","Post","Postman","Potash","Potter","Potts","Poucher","Poul","Poulter","Pouncey","Pournaras","Powder","Powe","Powel","Powell","Power","Powers","Pownall","Poyssick","Pozzy","Pradeep","Prader","Prady","Prager","Prakash","Prasad","Pratt","Pratte","Pravit","Prebo","Preciosa","Preiser","Prem","Premer","Pren","Prendergast","Prent","Prentice","Prentiss","Presber","Prescott","Presley","Press","Pressey","Pressman","Prestige","Preston","Pretrice","Preuss","Previdi","Prevot","Price","Prichard","Pricilla","Pride","Priebe","Priest","Priestley","Prima","Primalia","Primavera","Primaveras","Primaveria","Primo","Primrosa","Primrose","Prince","Princess","Prinz","Prior","Pris","Prisca","Priscella","Priscilla","Prisilla","Prissie","Prissy","Pritchard","Pritchett","Prober","Prochora","Prochoras","Procora","Procter","Procto","Proctor","Profant","Proffitt","Pronty","Pros","Prosper","Prospero","Prosperus","Prosser","Proud","Proudfoot","Proudlove","Proudman","Proulx","Prouty","Prowel","Pru","Pruchno","Prud","Prudence","Prudhoe","Prudi","Prudie","Prudy","Prue","Prunella","Prussian","Pruter","Pry","Pryce","Pryor","Psyche","Pubilis","Publea","Publia","Publias","Publius","Publus","Pucida","Pudendas","Pudens","Puduns","Puett","Pufahl","Puff","Pugh","Puglia","Puiia","Puklich","Pul","Pulcheria","Pulchi","Pulchia","Pulling","Pulsifer","Pump","Punak","Punke","Purcell","Purdum","Purdy","Puri","Purington","Puritan","Purity","Purpura","Purse","Purvis","Putnam","Putnem","Puto","Putscher","Puttergill","Py","Pyle","Pylle","Pyne","Pyotr","Pyszka","Pytlik","Quackenbush","Quar","Quarta","Quartana","Quartas","Quartet","Quartis","Quartus","Queen","Queena","Queenie","Quenby","Quenna","Quennie","Quent","Quentin","Queri","Querida","Queridas","Questa","Queston","Quick","Quickel","Quickman","Quigley","Quill","Quillan","Quillon","Quin","Quinby","Quince","Quincey","Quincy","Quinlan","Quinn","Quint","Quinta","Quintana","Quintessa","Quintie","Quintilla","Quintin","Quintina","Quinton","Quintus","Quirita","Quirk","Quita","Quiteri","Quiteria","Quiteris","Quitt","Qulllon","Raab","Raama","Raasch","Rab","Rabah","Rabassa","Rabbi","Rabelais","Rabi","Rabiah","Rabin","Rabjohn","Rabkin","Rabush","Race","Rachaba","Rachael","Rachel","Rachele","Rachelle","Racklin","Rad","Radack","Radborne","Radbourne","Radbun","Radburn","Radcliffe","Raddatz","Raddi","Raddie","Raddy","Radferd","Radford","Radie","Radke","Radley","Radloff","Radman","Radmen","Radmilla","Radu","Rae","Raeann","Raf","Rafa","Rafael","Rafaela","Rafaelia","Rafaelita","Rafaelle","Rafaellle","Rafaello","Rafaelof","Rafat","Rafe","Raff","Raffaello","Raffarty","Rafferty","Raffin","Raffo","Rafi","Rafiq","Rafter","Ragan","Ragen","Ragg","Ragland","Ragnar","Ragouzis","Ragucci","Rahal","Rahel","Rahm","Rahman","Rahmann","Rahr","Rai","Raila","Raimes","Raimondo","Raimund","Raimundo","Raina","Rainah","Raine","Rainer","Raines","Rainger","Rainie","Rains","Rainwater","Rajewski","Raji","Rajiv","Rakel","Rakia","Ralaigh","Raleigh","Ralf","Ralfston","Ralina","Ralleigh","Ralli","Ralph","Ralston","Ram","Rama","Ramah","Raman","Ramberg","Rambert","Rambort","Rambow","Ramburt","Rame","Ramey","Ramiah","Ramin","Ramon","Ramona","Ramonda","Ramos","Ramsay","Ramsdell","Ramsden","Ramses","Ramsey","Ramunni","Ran","Rana","Rance","Rancell","Ranchod","Rand","Randa","Randal","Randall","Randee","Randell","Randene","Randi","Randie","Randolf","Randolph","Randy","Ranee","Raney","Range","Rangel","Ranger","Rani","Rania","Ranice","Ranie","Ranique","Ranit","Ranita","Ranite","Ranitta","Ranjiv","Rankin","Rann","Ranna","Ransell","Ransom","Ransome","Ranson","Ranzini","Rao","Raouf","Raoul","Rap","Rape","Raphael","Raphaela","Rapp","Raquel","Raquela","Ras","Raseda","Raseta","Rashida","Rashidi","Rasia","Rask","Raskin","Raskind","Rasla","Rasmussen","Rastus","Rasure","Ratcliff","Ratcliffe","Ratha","Rather","Ratib","Rattan","Rattray","Rauch","Raul","Rausch","Rauscher","Raveaux","Raven","Ravens","Ravi","Ravid","Raviv","Ravo","Rawdan","Rawden","Rawdin","Rawdon","Rawley","Rawlinson","Ray","Raybin","Raybourne","Rayburn","Raychel","Raycher","Raye","Rayford","Rayle","Raymond","Raymonds","Raymund","Rayna","Raynah","Raynard","Raynata","Raynell","Rayner","Raynold","Raynor","Rayshell","Razid","Rea","Reace","Read","Reade","Readus","Ready","Reagan","Reagen","Reahard","Reames","Reamonn","Reamy","Reave","Reba","Rebah","Rebak","Rebane","Rebba","Rebbecca","Rebe","Rebeca","Rebecca","Rebecka","Rebeka","Rebekah","Rebekkah","Rebel","Rebhun","Rech","Recha","Rechaba","Reckford","Recor","Rector","Red","Redd","Reddin","Reddy","Redfield","Redford","Redman","Redmer","Redmond","Redmund","Redvers","Redwine","Ree","Reeba","Reece","Reed","Reede","Reedy","Reeher","Reel","Reena","Rees","Reese","Reeta","Reeva","Reeve","Reeves","Reg","Regan","Regazzi","Regen","Reger","Reggi","Reggie","Reggis","Reggy","Regina","Reginald","Reginauld","Regine","Rego","Rehm","Rehnberg","Reich","Reiche","Reichel","Reichert","Reid","Reidar","Reider","Reifel","Reiko","Reilly","Reimer","Rein","Reina","Reinald","Reinaldo","Reinaldos","Reine","Reiner","Reiners","Reinert","Reinertson","Reinhard","Reinhardt","Reinhart","Reinhold","Reinke","Reinold","Reinwald","Reis","Reisch","Reiser","Reisfield","Reisinger","Reisman","Reiss","Reiter","Reitman","Reld","Rella","Rellia","Relly","Rem","Rema","Remde","Remington","Remmer","Rempe","Remsen","Remus","Remy","Rena","Renado","Renae","Renaldo","Renard","Renata","Renate","Renato","Renaud","Renault","Renckens","Rene","Renee","Renell","Renelle","Reneta","Renferd","Renfred","Reni","Renick","Renie","Renita","Reniti","Rennane","Renner","Rennie","Rennold","Renny","Rento","Rentsch","Rentschler","Renwick","Renzo","Reo","Resa","Rese","Reseda","Resee","Reseta","Resor","Ress","Ressler","Reste","Restivo","Reta","Retha","Rett","Rettig","Rettke","Reube","Reuben","Reuven","Revell","Reviel","Reviere","Revkah","Rew","Rex","Rexana","Rexanna","Rexanne","Rexer","Rexferd","Rexford","Rexfourd","Rey","Reyna","Reynard","Reynold","Reynolds","Rezzani","Rhea","Rheba","Rhee","Rheims","Rheingold","Rheinlander","Rheta","Rhett","Rhetta","Rhiamon","Rhiana","Rhianna","Rhianon","Rhine","Rhines","Rhoades","Rhoads","Rhoda","Rhodes","Rhodia","Rhodie","Rhody","Rhona","Rhonda","Rhu","Rhynd","Rhyne","Rhyner","Rhys","Ri","Ria","Riana","Riancho","Riane","Rianna","Riannon","Rianon","Riba","Ribal","Ribaudo","Ribble","Ric","Rica","Ricard","Ricarda","Ricardama","Ricardo","Ricca","Riccardo","Riccio","Rice","Rich","Richara","Richard","Richarda","Richardo","Richards","Richardson","Richart","Richel","Richela","Richella","Richelle","Richer","Richers","Richey","Richia","Richie","Richlad","Richma","Richmal","Richman","Richmond","Richmound","Richter","Richy","Rici","Rick","Rickard","Rickart","Ricker","Rickert","Ricketts","Rickey","Ricki","Rickie","Ricky","Rico","Ricoriki","Rida","Riddle","Rider","Ridglea","Ridglee","Ridgley","Ridinger","Ridley","Rie","Riebling","Riedel","Riegel","Rieger","Riehl","Riella","Ries","Riesman","Riess","Rieth","Riffle","Rifkin","Rigby","Rigdon","Riggall","Riggins","Riggs","Riha","Rihana","Rik","Rika","Riker","Riki","Rikki","Rilda","Riley","Rillings","Rillis","Rima","Rimas","Rimma","Rimola","Rina","Rinaldo","Rind","Rinee","Ring","Ringe","Ringler","Ringo","Ringsmuth","Rinna","Rintoul","Riobard","Riocard","Rior","Riordan","Riorsson","Rip","Ripleigh","Riplex","Ripley","Ripp","Risa","Rise","Risley","Rissa","Risser","Rist","Risteau","Rita","Ritch","Ritchie","Riti","Ritter","Ritz","Riva","Rivalee","Rivard","River","Rivera","Rivers","Rives","Rivi","Rivkah","Rivy","Rizas","Rizika","Rizzi","Rizzo","Ro","Roach","Roana","Roane","Roanna","Roanne","Roarke","Roath","Rob","Robaina","Robb","Robbert","Robbi","Robbie","Robbin","Robbins","Robby","Robbyn","Robena","Robenia","Robers","Roberson","Robert","Roberta","Roberto","Roberts","Robertson","Robet","Robi","Robillard","Robin","Robina","Robinet","Robinett","Robinetta","Robinette","Robinia","Robins","Robinson","Robison","Robson","Roby","Robyn","Rocca","Rocco","Roch","Roche","Rochell","Rochella","Rochelle","Rochemont","Rocher","Rochester","Rochette","Rochkind","Rochus","Rock","Rockafellow","Rockefeller","Rockel","Rocker","Rockey","Rockie","Rockwell","Rockwood","Rocky","Rocray","Rod","Roda","Rodd","Roddie","Roddy","Rodenhouse","Roderic","Roderica","Roderich","Roderick","Roderigo","Rodge","Rodger","Rodgers","Rodi","Rodie","Rodina","Rodl","Rodman","Rodmann","Rodmun","Rodmur","Rodney","Rodolfo","Rodolph","Rodolphe","Rodrich","Rodrick","Rodrigo","Rodriguez","Rodrique","Roe","Roede","Roee","Roehm","Roer","Roeser","Rog","Roger","Rogerio","Rogers","Rogerson","Rogovy","Rogozen","Rohn","Roi","Roice","Roid","Rois","Rojas","Rokach","Rola","Rolan","Roland","Rolanda","Rolando","Rolandson","Roldan","Roley","Rolf","Rolfe","Rolfston","Rolland","Rollet","Rollie","Rollin","Rollins","Rollo","Rolo","Rolph","Roma","Romain","Romaine","Romalda","Roman","Romanas","Romano","Rombert","Rome","Romelda","Romelle","Romeo","Romeon","Romeu","Romeyn","Romie","Romilda","Romilly","Romina","Romine","Romito","Romney","Romo","Romola","Romona","Romonda","Romulus","Romy","Ron","Rona","Ronal","Ronald","Ronalda","Ronda","Rondi","Rondon","Ronel","Ronen","Ronica","Ronn","Ronna","Ronnholm","Ronni","Ronnica","Ronnie","Ronny","Roobbie","Rooke","Rooker","Rooney","Roos","Roose","Roosevelt","Root","Roots","Roper","Roque","Rora","Rori","Rorie","Rorke","Rorry","Rorrys","Rory","Ros","Rosa","Rosabel","Rosabella","Rosabelle","Rosalba","Rosalee","Rosaleen","Rosalia","Rosalie","Rosalind","Rosalinda","Rosalinde","Rosaline","Rosalyn","Rosalynd","Rosamond","Rosamund","Rosana","Rosane","Rosanna","Rosanne","Rosario","Rosati","Rosco","Roscoe","Rose","Roseann","Roseanna","Roseanne","Rosecan","Rosel","Roselane","Roselani","Roselba","Roselia","Roselin","Roseline","Rosella","Roselle","Roselyn","Rosemare","Rosemari","Rosemaria","Rosemarie","Rosemary","Rosemonde","Rosen","Rosena","Rosenbaum","Rosenberg","Rosenberger","Rosenblast","Rosenblatt","Rosenblum","Rosene","Rosenfeld","Rosenkrantz","Rosenkranz","Rosenquist","Rosenstein","Rosenthal","Rosenwald","Rosenzweig","Rosetta","Rosette","Roshan","Roshelle","Rosie","Rosina","Rosinski","Rosio","Rosita","Roskes","Roslyn","Rosmarin","Rosmunda","Rosner","Rosol","Ross","Rosse","Rossen","Rossi","Rossie","Rossing","Rossner","Rossuck","Rossy","Rostand","Roswald","Roswell","Rosy","Rotberg","Roter","Roth","Rothberg","Rothenberg","Rother","Rothmuller","Rothschild","Rothstein","Rothwell","Roti","Rotman","Rotow","Roumell","Rourke","Routh","Rouvin","Roux","Rovelli","Rovit","Rovner","Row","Rowan","Rowe","Rowell","Rowen","Rowena","Rowland","Rowley","Rowney","Rox","Roxana","Roxane","Roxanna","Roxanne","Roxi","Roxie","Roxine","Roxy","Roy","Royal","Royall","Roybn","Royce","Royd","Roydd","Royden","Roye","Royo","Roz","Rozalie","Rozalin","Rozamond","Rozanna","Rozanne","Roze","Rozek","Rozele","Rozella","Rozelle","Rozina","Rriocard","Ru","Rubbico","Rube","Rubel","Ruben","Rubens","Rubenstein","Ruberta","Rubetta","Rubi","Rubia","Rubie","Rubin","Rubina","Rubinstein","Rubio","Ruby","Rucker","Ruckman","Rudd","Ruddie","Ruddy","Rudelson","Ruder","Rudich","Rudie","Rudiger","Rudin","Rudman","Rudolf","Rudolfo","Rudolph","Rudwik","Rudy","Rudyard","Rue","Ruel","Ruella","Ruelle","Ruelu","Rufe","Rufena","Ruff","Ruffi","Ruffin","Ruffina","Ruffo","Rufford","Rufina","Ruford","Rufus","Rugen","Rugg","Ruggiero","Ruhl","Ruhnke","Ruiz","Rumery","Rumilly","Rumney","Rumpf","Runck","Rundgren","Runkel","Runkle","Runstadler","Rupert","Ruperta","Ruperto","Ruphina","Ruprecht","Rurik","Rus","Ruscher","Ruscio","Rusel","Rusell","Rusert","Rush","Rushing","Ruskin","Russ","Russel","Russell","Russi","Russia","Russian","Russo","Russom","Russon","Rust","Rustice","Rusticus","Rustie","Rustin","Rusty","Rutan","Rutger","Ruth","Ruthann","Ruthanne","Ruthe","Rutherford","Rutherfurd","Ruthi","Ruthie","Ruthven","Ruthy","Rutledge","Rutter","Ruttger","Ruvolo","Ruy","Ruyle","Ruzich","Ryan","Ryann","Rycca","Rydder","Ryder","Rye","Ryle","Ryley","Ryon","Rysler","Ryter","Ryun","Saba","Sabah","Sabba","Sabec","Sabella","Sabelle","Saber","Saberhagen","Saberio","Sabian","Sabina","Sabine","Sabino","Sabir","Sabra","Sabrina","Sabsay","Sabu","Sacci","Sacha","Sachi","Sachiko","Sachs","Sachsse","Sacken","Sackey","Sackman","Sacks","Sacksen","Sackville","Sacttler","Sad","Sada","Saddler","Sadella","Sadick","Sadie","Sadira","Sadirah","Sadiras","Sadler","Sadoc","Sadoff","Sadonia","Sadowski","Sadye","Saeger","Saffian","Saffier","Saffren","Safier","Safir","Safire","Safko","Sage","Sager","Sagerman","Saidee","Saidel","Saideman","Saied","Saiff","Sailesh","Saimon","Saint","Sair","Saire","Saito","Sajovich","Sakhuja","Sakmar","Sakovich","Saks","Sal","Salahi","Salaidh","Salamanca","Salamone","Salangi","Salangia","Salas","Salazar","Salba","Salbu","Salchunas","Sale","Saleem","Salem","Salema","Saleme","Salena","Salene","Salesin","Salim","Salina","Salinas","Salisbarry","Salisbury","Salita","Sall","Sallee","Salli","Sallie","Sally","Sallyann","Sallyanne","Salman","Salmon","Saloma","Salome","Salomi","Salomie","Salomo","Salomon","Salomone","Salot","Salsbury","Salter","Saltsman","Saltzman","Salvador","Salvadore","Salvatore","Salvay","Salvidor","Salvucci","Salzhauer","Sam","Sama","Samal","Samala","Samale","Samalla","Samantha","Samanthia","Samara","Samaria","Samau","Samella","Samford","Sami","Samira","Sammer","Sammie","Sammons","Sammy","Samp","Sampson","Sams","Samson","Samuel","Samuela","Samuele","Samuella","Samuelson","Samul","Samy","Sanalda","Sanbo","Sanborn","Sanborne","Sanburn","Sancha","Sanchez","Sancho","Sand","Sandberg","Sande","Sandeep","Sandell","Sander","Sanders","Sanderson","Sandi","Sandie","Sandler","Sandon","Sandor","Sandra","Sandro","Sandry","Sands","Sandstrom","Sandy","Sandye","Sanferd","Sanfo","Sanford","Sanfourd","Sanfred","Sang","Sanger","Sanjay","Sanjiv","Sankaran","Sankey","Sansbury","Sansen","Sanson","Sansone","Santa","Santana","Santiago","Santini","Santoro","Santos","Sanyu","Sapers","Saphra","Sapienza","Sapowith","Sapphera","Sapphira","Sapphire","Sara","Sara-Ann","Saraann","Sarad","Sarah","Saraiya","Sarajane","Sarazen","Sarchet","Sardella","Saree","Sarena","Sarene","Saretta","Sarette","Sarge","Sargent","Sari","Sarid","Sarilda","Sarina","Sarine","Sarita","Sarkaria","Sarnoff","Sarson","Sartin","Sascha","Sasha","Sashenka","Sasnett","Sass","Sassan","Sateia","Sathrum","Sato","Satterfield","Satterlee","Saturday","Saucy","Sauder","Saudra","Sauer","Sauers","Saul","Sauls","Saum","Sauncho","Saunder","Saunders","Saunderson","Saundra","Sausa","Sauveur","Savadove","Savage","Saval","Savanna","Savannah","Savdeep","Savell","Savick","Savil","Savill","Saville","Savina","Savior","Savitt","Savory","Saw","Sawtelle","Sawyer","Sawyere","Sawyor","Sax","Saxe","Saxen","Saxena","Saxon","Say","Sayce","Sayed","Sayer","Sayers","Sayette","Sayles","Saylor","Sayre","Sayres","Scales","Scammon","Scandura","Scarface","Scarito","Scarlet","Scarlett","Scarrow","Scever","Scevo","Scevor","Scevour","Schaab","Schaaff","Schach","Schacker","Schaefer","Schaeffer","Schafer","Schaffel","Schaffer","Schalles","Schaper","Schapira","Scharaga","Scharf","Scharff","Schargel","Schatz","Schaumberger","Schear","Schechinger","Schechter","Scheck","Schecter","Scheer","Scheers","Scheider","Scheld","Schell","Schellens","Schenck","Scherle","Scherman","Schertz","Schick","Schiff","Schiffman","Schifra","Schild","Schilit","Schilling","Schilt","Schindler","Schinica","Schiro","Schlenger","Schlesinger","Schlessel","Schlessinger","Schlicher","Schlosser","Schluter","Schmeltzer","Schmidt","Schmitt","Schmitz","Schnabel","Schnapp","Schnell","Schnorr","Schnur","Schnurr","Schober","Schoenberg","Schoenburg","Schoenfelder","Schoening","Schofield","Scholem","Scholz","Schonfeld","Schonfield","Schonthal","Schoof","Schott","Schou","Schouten","Schrader","Schram","Schramke","Schreck","Schreib","Schreibe","Schreiber","Schreibman","Schrick","Schriever","Schroder","Schroeder","Schroer","Schroth","Schubert","Schug","Schuh","Schulein","Schuler","Schulman","Schultz","Schulz","Schulze","Schuman","Schumer","Schurman","Schuster","Schuyler","Schwab","Schwartz","Schwarz","Schweiker","Schweitzer","Schwejda","Schwenk","Schwerin","Schwing","Schwinn","Schwitzer","Scibert","Sclar","Sclater","Scoles","Scopp","Scornik","Scot","Scoter","Scotney","Scott","Scotti","Scottie","Scotty","Scoville","Screens","Scribner","Scriven","Scrivenor","Scrivens","Scrivings","Scrogan","Scrope","Sculley","Scully","Scurlock","Scutt","Seabrook","Seabrooke","Seabury","Seaddon","Seaden","Seadon","Seafowl","Seagrave","Seagraves","Seale","Seaman","Seamus","Sean","Seana","Searby","Searcy","Searle","Sears","Season","Seaton","Seaver","Seavey","Seavir","Sebastian","Sebastiano","Sebastien","Sebbie","Secor","Secrest","Secunda","Secundas","Seda","Sedberry","Sedda","Sedgewake","Sedgewick","Sedgewinn","Sedlik","See","Seebeck","Seed","Seedman","Seel","Seely","Seem","Seema","Seen","Seena","Seessel","Seeto","Seften","Sefton","Seftton","Segal","Segalman","Seiber","Seibold","Seidel","Seiden","Seidler","Seidule","Seif","Seigel","Seigler","Seiter","Seitz","Seka","Seko","Sekofski","Sekyere","Sela","Selassie","Selby","Selda","Seldan","Selden","Seldon","Seldun","Selemas","Selena","Selene","Selestina","Seleta","Selfridge","Selhorst","Selia","Selie","Selig","Seligman","Seligmann","Selima","Selimah","Selina","Selinda","Seline","Selinski","Sell","Sella","Selle","Sellers","Sellma","Sello","Sells","Selma","Selmner","Selmore","Selry","Seltzer","Selway","Selwin","Selwyn","Semela","Semele","Semmes","Sena","Senalda","Sender","Senecal","Senhauser","Senior","Senn","Sension","Senskell","Senzer","Seow","Sephira","Seppala","September","Septima","Sera","Serafina","Serafine","Seraphim","Seraphina","Seraphine","Serena","Serene","Serg","Serge","Sergeant","Sergei","Sergent","Sergias","Sergio","Sergius","Sergo","Sergu","Serica","Serilda","Serle","Serles","Seroka","Serra","Serrano","Serrell","Servais","Server","Servetnick","Service","Sessler","Seta","Seth","Sethi","Sethrida","Seto","Seton","Settera","Settle","Seumas","Sev","Seve","Severen","Severin","Severn","Severson","Sevik","Seward","Sewel","Sewell","Sewellyn","Sewole","Sewoll","Sexton","Seyler","Seymour","Seys","Sezen","Shabbir","Shaddock","Shadow","Shae","Shaefer","Shaeffer","Shaer","Shafer","Shaff","Shaffer","Shaffert","Shah","Shaia","Shaikh","Shaina","Shaine","Shakespeare","Shakti","Shalna","Shalne","Shalom","Shama","Shamma","Shamrao","Shamus","Shana","Shanahan","Shanan","Shanda","Shandee","Shandeigh","Shandie","Shandra","Shandy","Shane","Shaner","Shani","Shanie","Shank","Shanks","Shanleigh","Shanley","Shanly","Shanna","Shannah","Shannan","Shannen","Shanney","Shannon","Shanon","Shanta","Shantee","Shantha","Shaper","Shapiro","Shara","Sharai","Shargel","Shari","Sharia","Sharity","Sharl","Sharla","Sharleen","Sharlene","Sharline","Sharma","Sharman","Sharon","Sharona","Sharos","Sharp","Sharpe","Sharron","Sharyl","Shatzer","Shaughn","Shaughnessy","Shaum","Shaun","Shauna","Shaver","Shaw","Shawn","Shawna","Shawnee","Shay","Shaya","Shayla","Shaylah","Shaylyn","Shaylynn","Shayn","Shayna","Shayne","Shea","Sheaff","Shear","Sheba","Shedd","Sheeb","Sheedy","Sheehan","Sheela","Sheelagh","Sheelah","Sheena","Sheepshanks","Sheeran","Sheeree","Sheets","Sheff","Sheffie","Sheffield","Sheffy","Sheila","Sheilah","Shel","Shela","Shelagh","Shelah","Shelba","Shelbi","Shelburne","Shelby","Shelden","Sheldon","Sheley","Shelia","Sheline","Shell","Shellans","Shelley","Shelli","Shellie","Shelly","Shelman","Shelton","Shem","Shena","Shenan","Sheng","Shep","Shepard","Shepherd","Shepley","Sheply","Shepp","Sheppard","Shepperd","Sher","Sherar","Sherard","Sherborn","Sherborne","Sherburn","Sherburne","Shere","Sheree","Sherer","Shererd","Sherfield","Sheri","Sheridan","Sherie","Sherill","Sherilyn","Sherj","Sherl","Sherline","Sherlock","Sherlocke","Sherm","Sherman","Shermie","Shermy","Sherourd","Sherr","Sherrard","Sherrer","Sherri","Sherrie","Sherrill","Sherris","Sherrod","Sherry","Sherurd","Sherwin","Sherwood","Sherwynd","Sherye","Sheryl","Sheryle","Shetrit","Shevlo","Shewchuk","Shewmaker","Sheya","Shiau","Shieh","Shiekh","Shields","Shien","Shiff","Shifra","Shifrah","Shig","Shih","Shiller","Shimberg","Shimkus","Shina","Shinberg","Shing","Shipley","Shipman","Shipp","Shippee","Shir","Shira","Shirah","Shirberg","Shiri","Shirk","Shirl","Shirlee","Shirleen","Shirlene","Shirley","Shirlie","Shirline","Shiroma","Shishko","Shiverick","Shivers","Shlomo","Shoemaker","Shoifet","Sholeen","Sholem","Sholes","Sholley","Sholom","Shore","Shornick","Short","Shorter","Shoshana","Shoshanna","Shotton","Showker","Shreeves","Shreve","Shrier","Shriner","Shriver","Shu","Shue","Shugart","Shulamith","Shulem","Shuler","Shulins","Shull","Shulman","Shulock","Shult","Shultz","Shum","Shuma","Shuman","Shumway","Shuping","Shurlock","Shurlocke","Shurwood","Shushan","Shute","Shutz","Shwalb","Shyamal","Si","Siana","Sianna","Sib","Sibbie","Sibby","Sibeal","Sibel","Sibell","Sibella","Sibelle","Siberson","Sibie","Sibilla","Sible","Siblee","Sibley","Sibyl","Sibylla","Sibylle","Sibyls","Sicard","Sices","Siclari","Sicular","Sid","Sida","Siddon","Siddra","Sidell","Sidhu","Sidky","Sidman","Sidnee","Sidney","Sidoma","Sidon","Sidoney","Sidonia","Sidonie","Sidonius","Sidonnie","Sidoon","Sidra","Sidran","Sidras","Sidwel","Sidwell","Sidwohl","Sieber","Siegel","Siegfried","Siegler","Sielen","Sieracki","Sierra","Siesser","Sievert","Siffre","Sig","Sigfrid","Sigfried","Sigismond","Sigismondo","Sigismund","Sigismundo","Sigler","Sigmund","Signe","Sigrid","Sigsmond","Sigvard","Sihon","Sihonn","Sihun","Sihunn","Sik","Sikata","Sikes","Sikko","Sikorski","Sil","Silas","Silber","Silberman","Silda","Silden","Sile","Sileas","Silin","Sill","Sillsby","Silma","Siloa","Siloam","Siloum","Silsby","Silsbye","Silva","Silvain","Silvan","Silvana","Silvano","Silvanus","Silver","Silverman","Silvers","Silverstein","Silverts","Silvester","Silvestro","Silvia","Silvie","Silvio","Sim","Sima","Simah","Simdars","Simeon","Simmie","Simmonds","Simmons","Simon","Simona","Simone","Simonetta","Simonette","Simonne","Simons","Simonsen","Simpkins","Simpson","Sims","Simsar","Simson","Sinai","Sinclair","Sinclare","Sindee","Sine","Sinegold","Singband","Singer","Singh","Singhal","Singleton","Sink","Sinnard","Siobhan","Sion","Sioux","Siouxie","Sipple","Sirkin","Sirmons","Sirois","Sirotek","Sisak","Sisco","Sisely","Sisile","Siskind","Sissel","Sissie","Sisson","Sissy","Sisto","Sitarski","Sitnik","Sitra","Siubhan","Siusan","Sivia","Sivie","Siward","Sjoberg","Skantze","Skardol","Skees","Skeie","Skell","Skelly","Skelton","Skerl","Skiba","Skier","Skiest","Skilken","Skill","Skillern","Skinner","Skip","Skipp","Skipper","Skippie","Skippy","Skipton","Sklar","Skolnik","Skricki","Skurnik","Skutchan","Skvorak","Sky","Skye","Skyla","Skylar","Skyler","Slaby","Slack","Slade","Sladen","Slater","Slaughter","Slavic","Slavin","Slayton","Sldney","Slemmer","Sletten","Slifka","Slinkman","Sliwa","Sloan","Sloane","Sloatman","Slocum","Slosberg","Slotnick","Sluiter","Sly","Slyke","Smail","Small","Smalley","Smallman","Smart","Smiga","Smiley","Smith","Smitt","Smitty","Smoot","Smukler","Snapp","Snashall","Sneed","Snell","Snider","Snoddy","Snodgrass","Snook","Snow","Snowber","Snowman","Snyder","So","Soane","Sobel","Soble","Socha","Socher","Sochor","Socrates","Soelch","Sofer","Sofia","Sofie","Sofko","Soinski","Sokil","Sokul","Sol","Sola","Solana","Solange","Solberg","Solenne","Solis","Solita","Solitta","Soll","Sollars","Solley","Sollie","Sollows","Solly","Solnit","Soloma","Soloman","Solomon","Solon","Soluk","Som","Somerset","Somerville","Sommer","Sommers","Son","Sondra","Soneson","Song","Soni","Sonia","Sonja","Sonni","Sonnie","Sonnnie","Sonny","Sonstrom","Sontag","Sontich","Sonya","Soo","Soph","Sopher","Sophey","Sophi","Sophia","Sophie","Sophronia","Sophy","Soracco","Soraya","Sorce","Sorcha","Sorci","Sorcim","Sorel","Soren","Sorensen","Sorenson","Sorilda","Sorkin","Sorrows","Sosanna","Sosna","Sosthena","Sosthenna","Sosthina","Sothena","Sotos","Sou","Soule","Soulier","Sousa","Southard","Southworth","Soutor","Souvaine","Souza","Sowell","Sower","Spada","Spain","Spalding","Spalla","Spancake","Spanjian","Spanos","Sparhawk","Spark","Sparke","Sparkie","Sparks","Sparky","Sparrow","Spatola","Spatz","Spaulding","Spear","Spearing","Spearman","Spears","Specht","Spector","Spence","Spencer","Spense","Spenser","Sperling","Speroni","Sperry","Spevek","Spiegel","Spiegelman","Spiegleman","Spieler","Spielman","Spiers","Spike","Spillar","Spindell","Spiro","Spiros","Spitzer","Spohr","Spooner","Spoor","Spracklen","Sprage","Spragens","Sprague","Spratt","Spring","Springer","Sproul","Sprung","Spurgeon","Squier","Squire","Squires","Srini","Staal","Stace","Stacee","Stacey","Staci","Stacia","Stacie","Stacy","Stafani","Staffan","Staffard","Stafford","Staford","Stag","Stagg","Stahl","Stalder","Staley","Stalk","Stalker","Stallworth","Stamata","Stambaugh","Stan","Stander","Standford","Standice","Standing","Standish","Standley","Standush","Stanfield","Stanfill","Stanford","Stanhope","Stanislas","Stanislaus","Stanislaw","Stanleigh","Stanley","Stanly","Stannfield","Stannwood","Stanton","Stanway","Stanwin","Stanwinn","Stanwood","Stanzel","Star","Starbuck","Stargell","Starinsky","Stark","Starkey","Starks","Starla","Starlene","Starlin","Starling","Starobin","Starr","Stasny","Staten","Statis","Stauder","Stauffer","Stav","Stavro","Stavros","Staw","Stclair","Stead","Steady","Stearn","Stearne","Stearns","Steck","Steddman","Stedman","Stedmann","Stedt","Steel","Steele","Steen","Steep","Steere","Stefa","Stefan","Stefanac","Stefania","Stefanie","Stefano","Steffane","Steffen","Steffi","Steffie","Steffin","Steffy","Stegman","Stein","Steinberg","Steiner","Steinke","Steinman","Steinway","Stella","Stelle","Stelmach","Stelu","Stempien","Stempson","Stenger","Stent","Stepha","Stephan","Stephana","Stephani","Stephania","Stephanie","Stephannie","Stephanus","Stephen","Stephenie","Stephens","Stephenson","Stephi","Stephie","Stephine","Sterling","Stern","Sternberg","Sterne","Sterner","Sternick","Sternlight","Sterrett","Stesha","Stets","Stetson","Stevana","Steve","Steven","Stevena","Stevens","Stevenson","Stevie","Stevy","Stew","Steward","Stewardson","Stewart","Stich","Stichter","Stickney","Stiegler","Stieglitz","Stier","Stig","Stila","Stiles","Still","Stilla","Stillas","Stillman","Stillmann","Stilu","Stilwell","Stimson","Stine","Stinky","Stinson","Stirling","Stoat","Stochmal","Stock","Stockmon","Stockton","Stockwell","Stoddard","Stoddart","Stodder","Stoeber","Stoecker","Stoffel","Stokes","Stoll","Stoller","Stolzer","Stone","Stoneham","Stoneman","Stonwin","Stoops","Storer","Storfer","Storm","Stormi","Stormie","Stormy","Stortz","Story","Storz","Stouffer","Stoughton","Stout","Stovall","Stover","Strade","Strader","Strage","Strain","Strait","Stralka","Strander","Strang","Stranger","Stratton","Straub","Straus","Strauss","Strawn","Streeter","Streetman","Streeto","Strenta","Strep","Strephon","Strephonn","Strepphon","Stretch","Stricklan","Strickland","Strickler","Strickman","Stringer","Strohbehn","Strohben","Strohl","Stromberg","Strong","Stronski","Stroud","Stroup","Struve","Stryker","Stu","Stuart","Stubbs","Stubstad","Stucker","Stuckey","Studdard","Studley","Studner","Studnia","Stulin","Stultz","Stuppy","Sturdivant","Sturges","Sturrock","Stutman","Stutsman","Stutzman","Styles","Su","Suanne","Subak","Subir","Sublett","Suchta","Suckow","Sucy","Sudbury","Sudderth","Sudhir","Sudnor","Sue","Suellen","Suelo","Sugar","Sugden","Sugihara","Suh","Suhail","Suilmann","Suk","Sukey","Sukhum","Suki","Sukin","Sula","Sulamith","Sullivan","Sully","Sum","Sumer","Sumerlin","Summer","Summers","Summons","Sumner","Sunda","Sunday","Sundberg","Sunderland","Sundin","Sundstrom","Suneya","Sung","Sunil","Sunny","Sunshine","Sup","Supat","Supen","Supple","Sura","Surbeck","Surovy","Survance","Susan","Susana","Susanetta","Susann","Susanna","Susannah","Susanne","Susette","Susi","Susie","Sussi","Sussman","Sussna","Susumu","Susy","Suter","Sutherlan","Sutherland","Sutphin","Sutton","Suu","Suzan","Suzann","Suzanna","Suzanne","Suzetta","Suzette","Suzi","Suzie","Suzy","Suzzy","Sven","Svend","Svensen","Sverre","Svetlana","Svoboda","Swagerty","Swain","Swaine","Swainson","Swamy","Swan","Swane","Swanhilda","Swanhildas","Swann","Swanson","Swart","Swarts","Swartz","Swayder","Swayne","Sweatt","Swec","Swee","Sweeney","Sweet","Swen","Swenson","Swetiana","Swetlana","Sweyn","Swiercz","Swift","Swigart","Swihart","Swinton","Swirsky","Swisher","Swithbart","Swithbert","Swithin","Switzer","Swope","Swor","Swords","Sy","Sybil","Sybila","Sybilla","Sybille","Sybley","Sybyl","Syck","Syd","Sydel","Sydelle","Sydney","Sykes","Syl","Sylas","Sylvan","Sylvanus","Sylvester","Sylvia","Sylvie","Syman","Symer","Symon","Symons","Synn","Syst","Syverson","TEirtza","Taam","Tab","Tabatha","Tabb","Tabbatha","Tabber","Tabbi","Tabbie","Tabbitha","Tabby","Taber","Tabib","Tabina","Tabitha","Tabor","Tabshey","Tace","Tacita","Tacklind","Tacy","Tacye","Tad","Tada","Tadashi","Tadd","Taddeo","Taddeusz","Tade","Tadeas","Tadeo","Tades","Tadich","Tadio","Taffy","Taft","Tager","Taggart","Tahmosh","Tai","Tailor","Taima","Taimi","Tait","Taite","Tak","Taka","Takakura","Takara","Takashi","Takeo","Takeshi","Takken","Tal","Tala","Talanian","Talanta","Talbert","Talbot","Talbott","Tali","Talia","Talich","Talie","Tallbot","Tallbott","Talley","Tallia","Tallie","Tallou","Tallu","Tallula","Tallulah","Tally","Talmud","Talya","Talyah","Tam","Tama","Tamah","Tamanaha","Tamar","Tamara","Tamarah","Tamarra","Tamaru","Tamas","Tamberg","Tamer","Tamera","Tami","Tamiko","Tamis","Tamma","Tammany","Tammara","Tammi","Tammie","Tammy","Tamqrah","Tamra","Tamsky","Tan","Tana","Tanah","Tanaka","Tanberg","Tandi","Tandie","Tandy","Tanhya","Tani","Tania","Tanitansy","Tankoos","Tann","Tannen","Tannenbaum","Tannenwald","Tanner","Tanney","Tannie","Tanny","Tansey","Tansy","Tanya","Tapes","Tara","Tarabar","Tarah","Taran","Tarazi","Tare","Tareyn","Targett","Tarkany","Taro","Tarr","Tarra","Tarrah","Tarrance","Tarrant","Tarrel","Tarrsus","Tarryn","Tarsus","Tarsuss","Tartaglia","Tartan","Tarton","Tarttan","Taryn","Taryne","Tasha","Tasia","Tasiana","Tat","Tate","Tati","Tatia","Tatiana","Tatianas","Tatiania","Tatianna","Tatman","Tattan","Tatum","Taub","Tav","Taveda","Tavey","Tavi","Tavia","Tavie","Tavis","Tavish","Tavy","Tawney","Tawnya","Tawsha","Tay","Tayib","Tayler","Taylor","Tayyebeb","Tchao","Teador","Teagan","Teage","Teague","Teahan","Teak","Tearle","Tecla","Tecu","Ted","Tedd","Tedda","Tedder","Teddi","Teddie","Teddman","Teddy","Tedi","Tedie","Tedman","Tedmann","Tedmund","Tedra","Tedric","Teece","Teena","Teerell","Teeter","Teevens","Teferi","Tega","Tegan","Teillo","Teilo","Tekla","Telfer","Telford","Telfore","Tella","Tellford","Tem","Tema","Temp","Tempa","Tempest","Templa","Templas","Temple","Templer","Templeton","Templia","Ten","Tena","Tench","Tenenbaum","Tengdin","Tengler","Tenn","Tenner","Tennes","Tenney","Tennies","Teodoor","Teodor","Teodora","Teodorico","Teodoro","Teplica","Teplitz","Tepper","Tera","Terbecki","Terchie","Terena","Terence","Terencio","Teresa","Terese","Teresina","Teresita","Teressa","Terhune","Teri","Teria","Teriann","Terina","Terle","Ternan","Terpstra","Terr","Terra","Terrance","Terrel","Terrell","Terrena","Terrence","Terrene","Terri","Terrie","Terrijo","Terrill","Terrilyn","Terris","Terriss","Territus","Terry","Terrye","Terryl","Terryn","Tersina","Terti","Tertia","Tertias","Tertius","Teryl","Teryn","Terza","Terzas","Tesler","Tess","Tessa","Tessi","Tessie","Tessler","Tessy","Teteak","Teufert","Teuton","Tevis","Tewell","Tewfik","Tews","Thacher","Thacker","Thackeray","Thad","Thaddaus","Thaddeus","Thaddus","Thadeus","Thagard","Thain","Thaine","Thais","Thalassa","Thalia","Tham","Thamora","Thamos","Thanasi","Thane","Thanh","Thanos","Thant","Thapa","Thar","Tharp","Thatch","Thatcher","Thaxter","Thay","Thayer","Thayne","The","Thea","Theadora","Theall","Thebault","Thecla","Theda","Thedric","Thedrick","Theis","Thekla","Thelma","Thema","Themis","Thenna","Theo","Theobald","Theodor","Theodora","Theodore","Theodoric","Theodosia","Theola","Theona","Theone","Thera","Theran","Theresa","Therese","Theresina","Theresita","Theressa","Therine","Theron","Therron","Thesda","Thessa","Theta","Thetes","Thetis","Thetisa","Thetos","Theurer","Theurich","Thevenot","Thia","Thibaud","Thibault","Thibaut","Thielen","Thier","Thierry","Thilda","Thilde","Thill","Thin","Thinia","Thirion","Thirza","Thirzi","Thirzia","Thisbe","Thisbee","Thissa","Thistle","Thoer","Thom","Thoma","Thomajan","Thomas","Thomasa","Thomasin","Thomasina","Thomasine","Thomey","Thompson","Thomsen","Thomson","Thor","Thora","Thorbert","Thordia","Thordis","Thorfinn","Thorin","Thorlay","Thorley","Thorlie","Thorma","Thorman","Thormora","Thorn","Thornburg","Thorncombe","Thorndike","Thorne","Thorner","Thornie","Thornton","Thorny","Thorpe","Thorr","Thorrlow","Thorstein","Thorsten","Thorvald","Thorwald","Thrasher","Three","Threlkeld","Thrift","Thun","Thunell","Thurber","Thurlough","Thurlow","Thurman","Thurmann","Thurmond","Thurnau","Thursby","Thurstan","Thurston","Thury","Thynne","Tia","Tiana","Tibbetts","Tibbitts","Tibbs","Tibold","Tica","Tice","Tichon","Tichonn","Ticknor","Ticon","Tidwell","Tiebold","Tiebout","Tiedeman","Tiemroth","Tien","Tiena","Tierell","Tiernan","Tierney","Tiersten","Tiertza","Tierza","Tifanie","Tiff","Tiffa","Tiffani","Tiffanie","Tiffanle","Tiffany","Tiffi","Tiffie","Tiffy","Tiga","Tigges","Tila","Tilda","Tilden","Tildi","Tildie","Tildy","Tiler","Tilford","Till","Tilla","Tillford","Tillfourd","Tillie","Tillinger","Tillio","Tillion","Tillman","Tillo","Tilly","Tilney","Tiloine","Tim","Tima","Timi","Timmi","Timmie","Timmons","Timms","Timmy","Timofei","Timon","Timoteo","Timothea","Timothee","Timotheus","Timothy","Tina","Tinaret","Tindall","Tine","Tingey","Tingley","Tini","Tiny","Tinya","Tiossem","Tiphane","Tiphani","Tiphanie","Tiphany","Tippets","Tips","Tipton","Tirrell","Tirza","Tirzah","Tisbe","Tisbee","Tisdale","Tish","Tisha","Tisman","Tita","Titania","Tito","Titos","Titus","Tizes","Tjaden","Tjader","Tjon","Tletski","Toback","Tobe","Tobey","Tobi","Tobiah","Tobias","Tobie","Tobin","Tobit","Toby","Tobye","Tocci","Tod","Todd","Toddie","Toddy","Todhunter","Toffey","Toffic","Toft","Toh","Toiboid","Toinette","Tol","Toland","Tolkan","Toll","Tolland","Tolley","Tolliver","Tollman","Tollmann","Tolmach","Tolman","Tolmann","Tom","Toma","Tomas","Tomasina","Tomasine","Tomaso","Tomasz","Tombaugh","Tomchay","Tome","Tomi","Tomkiel","Tomkin","Tomkins","Tomlin","Tomlinson","Tommi","Tommie","Tommy","Tompkins","Toms","Toney","Tongue","Toni","Tonia","Tonie","Tonina","Tonjes","Tonkin","Tonl","Tonneson","Tonnie","Tonry","Tony","Tonya","Tonye","Toogood","Toole","Tooley","Toolis","Toomay","Toombs","Toomin","Toor","Tootsie","Topliffe","Topper","Topping","Tor","Torbart","Torbert","Tore","Torey","Torhert","Tori","Torie","Torin","Tormoria","Torosian","Torp","Torr","Torrance","Torras","Torray","Torre","Torrell","Torrence","Torres","Torrey","Torrie","Torrin","Torrlow","Torruella","Torry","Torto","Tortosa","Tory","Toscano","Tosch","Toshiko","Toth","Touber","Toulon","Tound","Tova","Tove","Towbin","Tower","Towers","Towill","Towland","Town","Towne","Towney","Townie","Townsend","Townshend","Towny","Towrey","Towroy","Toy","Trabue","Tracay","Trace","Tracee","Tracey","Traci","Tracie","Tracy","Trager","Trahern","Trahurn","Trainer","Trainor","Trakas","Trammel","Tran","Tranquada","Trant","Trask","Tratner","Trauner","Trautman","Travax","Traver","Travers","Travis","Travus","Traweek","Tray","Treacy","Treat","Trefler","Trefor","Treharne","Treiber","Trela","Trella","Trellas","Trelu","Tremain","Tremaine","Tremann","Tremayne","Trembly","Tremml","Trenna","Trent","Trenton","Tresa","Trescha","Trescott","Tressa","Tressia","Treulich","Trev","Treva","Trevah","Trevar","Trever","Trevethick","Trevor","Trevorr","Trey","Tri","Trici","Tricia","Trilbee","Trilbi","Trilbie","Trilby","Triley","Trill","Trillbee","Trillby","Trilley","Trilly","Trimble","Trimmer","Trin","Trina","Trinatte","Trinee","Trinetta","Trinette","Trini","Trinia","Trinidad","Trinity","Trinl","Triny","Trip","Triplett","Tripp","Tris","Trisa","Trish","Trisha","Trista","Tristam","Tristan","Tristas","Tristis","Tristram","Trix","Trixi","Trixie","Trixy","Trocki","Trojan","Trometer","Tronna","Troth","Trotta","Trotter","Trout","Trovillion","Trow","Troxell","Troy","Troyes","Trstram","Trubow","Truc","Truda","Trude","Trudey","Trudi","Trudie","Trudnak","Trudy","True","Trueblood","Truelove","Trueman","Truitt","Trula","Trumaine","Truman","Trumann","Truscott","Trust","Trutko","Tryck","Trygve","Tsai","Tsan","Tse","Tseng","Tshombe","Tsuda","Tsui","Tu","Tubb","Tuchman","Tuck","Tucker","Tuckie","Tucky","Tuddor","Tudela","Tudor","Tuesday","Tufts","Tugman","Tuinenga","Tull","Tulley","Tullius","Tullus","Tullusus","Tully","Tumer","Tuneberg","Tung","Tunnell","Tupler","Tuppeny","Turino","Turk","Turley","Turmel","Turnbull","Turne","Turner","Turnheim","Turoff","Turpin","Turrell","Turro","Turtle","Tut","Tutankhamen","Tutt","Tuttle","Tutto","Twedy","Twelve","Twila","Twitt","Twum","Twyla","Ty","Tybald","Tybalt","Tybi","Tybie","Tychon","Tychonn","Tye","Tyika","Tyler","Tymes","Tymon","Tymothy","Tynan","Tyne","Tyra","Tyre","Tyree","Tyrone","Tyrrell","Tyrus","Tyson","Tzong","Ubald","Uball","Ubana","Ube","Uchida","Uchish","Uda","Udale","Udall","Udela","Udele","Udell","Udella","Udelle","Uel","Uela","Uella","Ugo","Uird","Uis","Uke","Ul","Ula","Ulah","Ulane","Ulani","Ulberto","Ulda","Ule","Ulick","Ulises","Ulita","Ulla","Ulland","Ullman","Ullund","Ullyot","Ulphi","Ulphia","Ulphiah","Ulric","Ulrica","Ulrich","Ulrick","Ulrika","Ulrikaumeko","Ulrike","Ultan","Ultann","Ultima","Ultun","Ulu","Ulund","Ulysses","Umberto","Ume","Umeh","Umeko","Ummersen","Umont","Un","Una","Unders","Underwood","Undine","Undis","Undry","Une","Ungley","Uni","Unity","Unni","Uno","Upali","Uphemia","Upshaw","Upton","Urana","Urania","Uranie","Urata","Urba","Urbai","Urbain","Urban","Urbana","Urbani","Urbanna","Urbannai","Urbannal","Urbano","Urbanus","Urbas","Uri","Uria","Uriah","Urial","Urian","Urias","Uriel","Urien","Uriia","Uriiah","Uriisa","Urina","Urion","Urissa","Urita","Urquhart","Ursa","Ursal","Ursala","Ursas","Ursel","Ursi","Ursola","Urson","Ursula","Ursulette","Ursulina","Ursuline","Ury","Usanis","Ushijima","Uta","Utas","Ute","Utham","Uthrop","Utica","Uticas","Utimer","Utley","Utta","Uttasta","Utter","Uttica","Uuge","Uund","Uwton","Uyekawa","Uzia","Uzial","Uziel","Uzzi","Uzzia","Uzzial","Uzziel","Va","Vaas","Vaasta","Vachel","Vachell","Vachil","Vachill","Vacla","Vaclav","Vaclava","Vacuva","Vada","Vaden","Vadim","Vadnee","Vaenfila","Vahe","Vaientina","Vail","Vaios","Vaish","Val","Vala","Valaree","Valaria","Valda","Valdas","Valdemar","Valdes","Valdis","Vale","Valeda","Valenba","Valencia","Valene","Valenka","Valenta","Valente","Valentia","Valentijn","Valentin","Valentina","Valentine","Valentino","Valenza","Valer","Valera","Valeria","Valerian","Valerie","Valerio","Valerlan","Valerle","Valery","Valerye","Valeta","Valiant","Valida","Valina","Valle","Valleau","Vallery","Valley","Valli","Vallie","Vallo","Vallonia","Vally","Valma","Valonia","Valoniah","Valora","Valorie","Valry","Valtin","Van","VanHook","Vance","Vanda","Vanden","Vander","Vanderhoek","Vandervelde","Vandyke","Vanessa","Vange","Vanhomrigh","Vani","Vania","Vanna","Vanni","Vannie","Vanny","Vano","Vanthe","Vanya","Vanzant","Varden","Vardon","Vareck","Vargas","Varhol","Varian","Varick","Varien","Varini","Varion","Varipapa","Varney","Varrian","Vary","Vas","Vashtee","Vashti","Vashtia","Vasileior","Vasilek","Vasili","Vasiliki","Vasilis","Vasiliu","Vasily","Vasos","Vasquez","Vassar","Vassaux","Vassell","Vassili","Vassily","Vasta","Vastah","Vastha","Vasti","Vasya","Vasyuta","Vaughan","Vaughn","Vaules","Veal","Veator","Veats","Veda","Vedetta","Vedette","Vedi","Vedis","Veedis","Velasco","Velda","Veleda","Velick","Veljkov","Velleman","Velma","Velvet","Vena","Venable","Venator","Venditti","Veneaux","Venetia","Venetis","Venezia","Venice","Venita","Venn","Veno","Venola","Venterea","Vento","Ventre","Ventura","Venu","Venus","Venuti","Ver","Vera","Verada","Veradi","Veradia","Veradis","Verbenia","Verda","Verdha","Verdi","Verdie","Vere","Verena","Verene","Verge","Verger","Vergil","Vergne","Vergos","Veriee","Verile","Verina","Verine","Verity","Verla","Verlee","Verlie","Vern","Verna","Verne","Vernen","Verner","Verneuil","Verney","Vernice","Vernier","Vernita","Vernon","Vernor","Veron","Veronica","Veronika","Veronike","Veronique","Verras","Vershen","Vescuso","Vesta","Veta","Vetter","Vevay","Vevina","Vevine","Vey","Vezza","Vharat","Vi","Viafore","Vial","Vic","Viccora","Vick","Vickey","Vicki","Vickie","Vicky","Victoir","Victor","Victoria","Victorie","Victorine","Victory","Vida","Vidal","Vidda","Viddah","Vidovic","Vidovik","Viehmann","Viens","Vierno","Vieva","Vig","Vigen","Viglione","Vigor","Viguerie","Viki","Viking","Vikki","Vikky","Vilberg","Vilhelmina","Villada","Villiers","Vilma","Vin","Vina","Vinaya","Vince","Vincelette","Vincent","Vincenta","Vincentia","Vincents","Vincenty","Vincenz","Vine","Vinia","Vinita","Vinn","Vinna","Vinni","Vinnie","Vinny","Vins","Vinson","Viola","Violante","Viole","Violet","Violeta","Violetta","Violette","Vipul","Viquelia","Viradis","Virendra","Virg","Virge","Virgel","Virgie","Virgil","Virgilia","Virgilio","Virgin","Virgina","Virginia","Virginie","Virgy","Viridi","Viridis","Viridissa","Virnelli","Viscardi","Vish","Vita","Vitale","Vitalis","Vite","Vitek","Vitia","Vitkun","Vito","Vitoria","Vittoria","Vittorio","Vitus","Viv","Viva","Viveca","Vivi","Vivia","Vivian","Viviana","Viviane","Vivianna","Vivianne","Vivica","Vivie","Vivien","Viviene","Vivienne","Viviyan","Vivl","Vivle","Vivyan","Vivyanne","Vizza","Vizzone","Vlad","Vlada","Vladamar","Vladamir","Vladi","Vladimar","Vladimir","Voccola","Voe","Vogel","Vogele","Vogeley","Vola","Volding","Voleta","Voletta","Volin","Volkan","Volnak","Volnay","Volney","Volny","Volotta","Volpe","Voltmer","Voltz","Von","Vona","Vonni","Vonnie","Vonny","Vookles","Voorhis","Vorfeld","Vories","Vorster","Voss","Votaw","Vowel","Vrablik","Vtarj","Vtehsta","Vudimir","Vullo","Vyky","Vyner","Vyse","Waal","Wachtel","Wachter","Wack","Waddell","Waddington","Waddle","Wade","Wadell","Wadesworth","Wadleigh","Wadlinger","Wadsworth","Waechter","Waers","Wager","Wagner","Wagoner","Wagshul","Wagstaff","Wahkuna","Wahl","Wahlstrom","Wailoo","Wain","Waine","Wainwright","Wait","Waite","Waiter","Wake","Wakeen","Wakefield","Wakerly","Waki","Walburga","Walcoff","Walcott","Walczak","Wald","Waldack","Waldemar","Walden","Waldman","Waldner","Waldo","Waldon","Waldos","Waldron","Wales","Walford","Waligore","Walke","Walker","Walkling","Wall","Wallace","Wallach","Wallache","Wallack","Wallas","Waller","Walley","Wallford","Walli","Wallie","Walling","Wallinga","Wallis","Walliw","Wallraff","Walls","Wally","Walrath","Walsh","Walston","Walt","Walter","Walters","Walther","Waltner","Walton","Walworth","Waly","Wampler","Wamsley","Wan","Wanda","Wandie","Wandis","Wandy","Wane","Waneta","Wanfried","Wang","Wanids","Wanonah","Wanyen","Wappes","Warchaw","Ward","Warde","Warden","Warder","Wardieu","Wardlaw","Wardle","Ware","Wareing","Warenne","Warfeld","Warfield","Warfold","Warford","Warfore","Warfourd","Warga","Warila","Waring","Warms","Warner","Warp","Warram","Warren","Warrenne","Warrick","Warrin","Warring","Warthman","Warton","Wartow","Warwick","Wash","Washburn","Washington","Washko","Wasserman","Wasson","Wassyngton","Wat","Watanabe","Waterer","Waterman","Waters","Watkin","Watkins","Watson","Watt","Wattenberg","Watters","Watts","Waugh","Wauters","Wavell","Waverley","Waverly","Wawro","Waxler","Waxman","Way","Waylan","Wayland","Waylen","Waylin","Waylon","Waynant","Wayne","Wayolle","Weaks","Wearing","Weasner","Weatherby","Weatherley","Weathers","Weaver","Web","Webb","Webber","Weber","Webster","Wedurn","Weed","Weeks","Wehner","Wehrle","Wei","Weibel","Weidar","Weide","Weider","Weidman","Weidner","Weig","Weight","Weigle","Weihs","Weikert","Weil","Weiler","Weiman","Wein","Weinberg","Weiner","Weinert","Weingarten","Weingartner","Weinhardt","Weinman","Weinreb","Weinrich","Weinshienk","Weinstein","Weinstock","Weintrob","Weir","Weirick","Weisbart","Weisberg","Weisbrodt","Weisburgh","Weiser","Weisler","Weisman","Weismann","Weiss","Weissberg","Weissman","Weissmann","Weitman","Weitzman","Weixel","Weksler","Welbie","Welby","Welch","Welcher","Welcome","Welcy","Weld","Weldon","Welford","Welker","Welles","Wellesley","Wellington","Wells","Welsh","Welton","Wenda","Wendall","Wendalyn","Wende","Wendel","Wendelin","Wendelina","Wendeline","Wendell","Wendi","Wendie","Wendin","Wendolyn","Wendt","Wendy","Wendye","Wenger","Wengert","Wenn","Wennerholn","Wenoa","Wenona","Wenonah","Wentworth","Wenz","Wera","Werbel","Werby","Werner","Wernher","Wernick","Wernsman","Werra","Wershba","Wertheimer","Wertz","Wes","Wesa","Wescott","Wesla","Wesle","Weslee","Wesley","Wessling","West","Westberg","Westbrook","Westbrooke","Wester","Westerfield","Westfahl","Westfall","Westhead","Westland","Westleigh","Westley","Westlund","Westmoreland","Westney","Weston","Westphal","Wetzel","Wetzell","Wexler","Wey","Weyermann","Weylin","Weywadt","Whale","Whalen","Whall","Whallon","Whang","Wharton","Whatley","Wheaton","Wheeler","Wheelwright","Whelan","Whetstone","Whiffen","Whiney","Whipple","Whit","Whitaker","Whitby","Whitcher","Whitcomb","White","Whitebook","Whitehouse","Whitehurst","Whitelaw","Whiteley","Whitford","Whiting","Whitman","Whitnell","Whitney","Whitson","Whittaker","Whittemore","Whitten","Whitver","Whorton","Whyte","Wiatt","Wiburg","Wichern","Wichman","Wickham","Wickman","Wickner","Wicks","Widera","Wie","Wiebmer","Wieche","Wiedmann","Wiencke","Wiener","Wier","Wieren","Wiersma","Wiese","Wiggins","Wight","Wightman","Wil","Wilber","Wilbert","Wilbur","Wilburn","Wilburt","Wilcox","Wilda","Wilde","Wildee","Wilden","Wilder","Wildermuth","Wildon","Wileen","Wilek","Wilen","Wiles","Wiley","Wilfred","Wilfreda","Wilfrid","Wilhelm","Wilhelmina","Wilhelmine","Wilhide","Wilie","Wilinski","Wilkens","Wilkey","Wilkie","Wilkins","Wilkinson","Wilkison","Will","Willa","Willabella","Willamina","Willard","Willcox","Willdon","Willem","Willet","Willett","Willetta","Willette","Willey","Willi","William","Williams","Williamsen","Williamson","Willie","Willin","Willing","Willis","Willman","Willmert","Willms","Willner","Willock","Willow","Wills","Willtrude","Willumsen","Willy","Willyt","Wilma","Wilmar","Wilmer","Wilmette","Wilmott","Wilona","Wilonah","Wilone","Wilow","Wilscam","Wilser","Wilsey","Wilson","Wilt","Wilterdink","Wilton","Wiltsey","Wiltshire","Wiltz","Wimsatt","Win","Wina","Wincer","Winchell","Winchester","Wind","Windham","Windsor","Windy","Windzer","Winebaum","Winer","Winfield","Winfred","Winfrid","Wing","Wini","Winifield","Winifred","Winikka","Winn","Winna","Winnah","Winne","Winni","Winnick","Winnie","Winnifred","Winny","Winograd","Winola","Winona","Winonah","Winou","Winser","Winshell","Winslow","Winson","Winsor","Winston","Winstonn","Winter","Winterbottom","Winters","Winther","Winthorpe","Winthrop","Winton","Winwaloe","Winzler","Wira","Wirth","Wise","Wiseman","Wiskind","Wisnicki","Wistrup","Wit","Witcher","Witha","Witherspoon","Witkin","Witt","Witte","Wittenburg","Wittie","Witty","Wivestad","Wivina","Wivinah","Wivinia","Wixted","Woehick","Woermer","Wohlen","Wohlert","Wojak","Wojcik","Wolbrom","Wolcott","Wolenik","Wolf","Wolfe","Wolff","Wolfgang","Wolfgram","Wolfie","Wolford","Wolfort","Wolfram","Wolfson","Wolfy","Wolgast","Wolk","Woll","Wollis","Wolpert","Wolsky","Womack","Won","Wonacott","Wong","Woo","Wood","Woodall","Woodberry","Woodcock","Woodford","Woodhead","Woodhouse","Woodie","Woodley","Woodman","Woodring","Woodrow","Woodruff","Woods","Woodson","Woodsum","Woodward","Woody","Woolcott","Wooldridge","Woolley","Woolson","Wooster","Wootan","Woothen","Wootten","Worden","Worl","Worlock","Worrell","Worsham","Worth","Worthington","Worthy","Wrand","Wren","Wrench","Wrennie","Wright","Wrightson","Wrigley","Wsan","Wu","Wulf","Wulfe","Wun","Wunder","Wurst","Wurster","Wurtz","Wyatan","Wyatt","Wyck","Wycoff","Wye","Wylde","Wylen","Wyler","Wylie","Wylma","Wyly","Wymore","Wyn","Wyndham","Wyne","Wynn","Wynne","Wynnie","Wynny","Wyon","Wystand","Xantha","Xanthe","Xanthus","Xavier","Xaviera","Xavler","Xena","Xenia","Xeno","Xenophon","Xenos","Xerxes","Xever","Ximena","Ximenes","Ximenez","Xylia","Xylina","Xylon","Xymenes","Yaakov","Yablon","Yacano","Yacov","Yaeger","Yael","Yager","Yahiya","Yaker","Yale","Yalonda","Yam","Yamauchi","Yanaton","Yance","Yancey","Yancy","Yand","Yank","Yankee","Yann","Yarak","Yard","Yardley","Yaron","Yarvis","Yasmeen","Yasmin","Yasmine","Yasu","Yasui","Yate","Yates","Yatzeck","Yaya","Yazbak","Yeargain","Yearwood","Yeaton","Yecies","Yee","Yeh","Yehudi","Yehudit","Yelena","Yelich","Yelmene","Yemane","Yeo","Yeorgi","Yerga","Yerkovich","Yerxa","Yesima","Yeta","Yetac","Yetah","Yetta","Yetti","Yettie","Yetty","Yeung","Yevette","Yi","Yila","Yim","Yirinec","Ylla","Ynes","Ynez","Yoho","Yoko","Yokoyama","Yokum","Yolanda","Yolande","Yolane","Yolanthe","Yona","Yonah","Yonatan","Yong","Yonina","Yonit","Yonita","Yoo","Yoong","Yordan","Yorgen","Yorgo","Yorgos","Yorick","York","Yorke","Yorker","Yoshi","Yoshiko","Yoshio","Youlton","Young","Younger","Younglove","Youngman","Youngran","Yousuf","Yovonnda","Ysabel","Yseult","Yseulta","Yseulte","Yuhas","Yuille","Yuji","Yuk","Yukio","Yul","Yule","Yulma","Yuma","Yumuk","Yun","Yunfei","Yung","Yunick","Yup","Yuri","Yuria","Yurik","Yursa","Yurt","Yusem","Yusuk","Yuu","Yuzik","Yves","Yvette","Yvon","Yvonne","Yvonner","Yvor","Zabrina","Zabrine","Zacarias","Zaccaria","Zacek","Zach","Zachar","Zacharia","Zachariah","Zacharias","Zacharie","Zachary","Zacherie","Zachery","Zack","Zackariah","Zacks","Zadack","Zadoc","Zahara","Zahavi","Zaid","Zailer","Zak","Zakaria","Zakarias","Zalea","Zales","Zaller","Zalucki","Zamir","Zamora","Zampardi","Zampino","Zandra","Zandt","Zane","Zaneski","Zaneta","Zannini","Zantos","Zanze","Zara","Zaragoza","Zarah","Zared","Zaremski","Zarger","Zaria","Zarla","Zashin","Zaslow","Zasuwa","Zavala","Zavras","Zawde","Zea","Zealand","Zeb","Zeba","Zebada","Zebadiah","Zebapda","Zebe","Zebedee","Zebulen","Zebulon","Zechariah","Zeculon","Zed","Zedekiah","Zeeba","Zeena","Zehe","Zeidman","Zeiger","Zeiler","Zeitler","Zeke","Zel","Zela","Zelazny","Zelda","Zelde","Zelig","Zelikow","Zelle","Zellner","Zelma","Zelten","Zena","Zenas","Zenda","Zendah","Zenger","Zenia","Zennas","Zennie","Zenobia","Zeph","Zephan","Zephaniah","Zeralda","Zerelda","Zerk","Zerla","Zerlina","Zerline","Zeta","Zetana","Zetes","Zetta","Zeus","Zhang","Zia","Ziagos","Zicarelli","Ziegler","Zielsdorf","Zigmund","Zigrang","Ziguard","Zilber","Zildjian","Zilla","Zillah","Zilvia","Zima","Zimmer","Zimmerman","Zimmermann","Zina","Zinah","Zinck","Zindman","Zingale","Zingg","Zink","Zinn","Zinnes","Zins","Zipah","Zipnick","Zippel","Zippora","Zipporah","Zirkle","Zischke","Zita","Zitah","Zitella","Zitvaa","Ziwot","Zoa","Zoara","Zoarah","Zoba","Zobe","Zobias","Zobkiw","Zoe","Zoeller","Zoellick","Zoes","Zoha","Zohar","Zohara","Zoi","Zoie","Zoila","Zoilla","Zola","Zoldi","Zoller","Zollie","Zolly","Zolnay","Zolner","Zoltai","Zonda","Zondra","Zonnya","Zora","Zorah","Zorana","Zorina","Zorine","Zosema","Zosi","Zosima","Zoubek","Zrike","Zsa","ZsaZsa","Zsazsa","Zsolway","Zubkoff","Zucker","Zuckerman","Zug","Zulch","Zuleika","Zulema","Zullo","Zumstein","Zumwalt","Zurek","Zurheide","Zurkow","Zurn","Zusman","Zuzana","Zwart","Zweig","Zwick","Zwiebel","Zysk"]
domains = ["aol.com", "att.net", "comcast.net", "facebook.com", "gmail.com", "gmx.com", "googlemail.com","google.com", "hotmail.com", "hotmail.co.uk", "mac.com", "me.com", "mail.com", "msn.com","live.com", "sbcglobal.net", "verizon.net", "yahoo.com", "yahoo.co.uk","email.com", "games.com", "gmx.net", "hush.com", "hushmail.com", "icloud.com", "inbox.com","lavabit.com", "love.com" , "outlook.com", "pobox.com", "rocketmail.com","safe-mail.net", "wow.com", "ygm.com", "ymail.com", "zoho.com", "fastmail.fm","yandex.com","bellsouth.net", "charter.net", "comcast.net", "cox.net", "earthlink.net", "juno.com","btinternet.com", "virginmedia.com", "blueyonder.co.uk", "freeserve.co.uk", "live.co.uk","ntlworld.com", "o2.co.uk", "orange.net", "sky.com", "talktalk.co.uk", "tiscali.co.uk","virgin.net", "wanadoo.co.uk", "bt.com","sina.com", "qq.com", "naver.com", "hanmail.net", "daum.net", "nate.com", "yahoo.co.jp", "yahoo.co.kr", "yahoo.co.id", "yahoo.co.in", "yahoo.com.sg", "yahoo.com.ph","hotmail.fr", "live.fr", "laposte.net", "yahoo.fr", "wanadoo.fr", "orange.fr", "gmx.fr", "sfr.fr", "neuf.fr", "free.fr","gmx.de", "hotmail.de", "live.de", "online.de", "t-online.de", "web.de", "yahoo.de","mail.ru", "rambler.ru", "yandex.ru", "ya.ru", "list.ru","hotmail.be", "live.be", "skynet.be", "voo.be", "tvcablenet.be", "telenet.be","hotmail.com.ar", "live.com.ar", "yahoo.com.ar", "fibertel.com.ar", "speedy.com.ar", "arnet.com.ar","hotmail.com", "gmail.com", "yahoo.com.mx", "live.com.mx", "yahoo.com", "hotmail.es", "live.com", "hotmail.com.mx", "prodigy.net.mx", "msn.com"]
def generate(max_num,gen_type):
if max_num == 0:
sys.exit(1)
for x in itertools.count(start=0,step=1):
password = ''
if random.randrange(6,26) > 12:
password = random.choice(common_passwords)
else:
password = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(2,8)))
if(x==max_num):
break
sys.exit(1)
else:
name = ''
if random.randrange(1,10) == 3:
name = str(random.choice(english_first_names))+"."+str(random.choice(surnames))+"@"+str(random.choice(domains))
elif random.randrange(1,10) == 1:
name = str.lower(random.choice(english_first_names))+"_"+''.join(random.choice(string.digits) for _ in range(random.randrange(1,5)))+"@"+random.choice(domains)
elif random.randrange(1,10) == 2:
name = str.lower(random.choice(surnames))+"-"+''.join(random.choice(string.digits) for _ in range(random.randrange(1,5)))+"@"+random.choice(domains)
elif random.randrange(1,10) == 8:
name = ''.join(random.choice(string.digits) for _ in range(random.randrange(1,5)))+str.upper(random.choice(surnames))+"@"+random.choice(domains)
elif random.randrange(1,10) == 9:
name = ''.join(random.choice(string.digits) for _ in range(random.randrange(1,2)))+"-"+str.upper(random.choice(surnames))+"@"+random.choice(domains)
elif random.randrange(1,10) == 5:
name = str.lower(random.choice(english_first_names))+"-"+''.join(random.choice(string.digits) for _ in range(random.randrange(1,5)))+"@"+random.choice(domains)
elif random.randrange(1,10) == 4:
name = str.upper(random.choice(english_first_names))+""+str.upper(random.choice(surnames))+"@"+random.choice(domains)
elif random.randrange(1,10) == 6:
name= ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,25)))+"@"+random.choice(domains)
elif random.randrange(1,10) == 7:
name= ''.join(random.choice(string.ascii_letters) for _ in range(random.randrange(1,5)))+"."+str(random.choice(surnames))+"@"+random.choice(domains)
elif random.randrange(1,10) == 10:
name= ''.join(random.choice(string.ascii_letters + string.digits + ['.','+','-','_']) for _ in range(random.randrange(6,15)))+"."+str(random.choice(surnames))+"@"+random.choice(domains)
else:
name = str(random.choice(english_first_names))+"_"+str(random.choice(surnames))+"@"+str(random.choice(domains))
if (gen_type == "clear"):
if (re.match(emailregex, name)):
print name,":",password
elif (gen_type == "sha256"):
if (re.match(emailregex, name)):
hash_object = hashlib.sha256(password).hexdigest()
print name,":",password,":",hash_object
elif (gen_type == "sha512"):
if (re.match(emailregex, name)):
hash_object = hashlib.sha512(password).hexdigest()
print name,":",password,":",hash_object
elif (gen_type == "md5"):
if (re.match(emailregex, name)):
hash_object = hashlib.md5(password).hexdigest()
print name,":",password,":",hash_object
elif (gen_type == "sha1"):
if (re.match(emailregex, name)):
hash_object = hashlib.sha1(password).hexdigest()
print name,":",password,":",hash_object
elif (gen_type == "ntlm"):
if (re.match(emailregex, name)):
hash_object = binascii.hexlify(hashlib.new('md4', password.encode('utf-16le')).digest())
print name,":",password,":",hash_object
elif (gen_type == "sha1_r_salt"):
if (re.match(emailregex, name)):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha1(pass_obj).hexdigest()
print name,":",pass_obj,":",hash_object
elif (gen_type == "sha256_r_salt"):
if (re.match(emailregex, name)):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha256(pass_obj).hexdigest()
print name,":",pass_obj,":",hash_object
elif (gen_type == "sha512_r_salt"):
if (re.match(emailregex, name)):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha512(pass_obj).hexdigest()
print name,":",pass_obj,":",hash_object
elif (gen_type == "md5_r_salt"):
if (re.match(emailregex, name)):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.md5(pass_obj).hexdigest()
print name,":",pass_obj,":",hash_object
elif (gen_type == "ntlm_r_salt"):
if (re.match(emailregex, name)):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = binascii.hexlify(hashlib.new('md4', pass_obj.encode('utf-16le')).digest())
print name,":",pass_obj,":",hash_object
elif(gen_type == "md5_hashonly"):
hash_object = hashlib.md5(password).hexdigest()
print hash_object
elif(gen_type == "sha1_hashonly"):
hash_object = hashlib.sha1(password).hexdigest()
print hash_object
elif(gen_type == "sha256_hashonly"):
hash_object = hashlib.sha256(password).hexdigest()
print hash_object
elif(gen_type == "sha512_hashonly"):
hash_object = hashlib.sha512(password).hexdigest()
print hash_object
elif(gen_type == "ntlm_hashonly"):
hash_object = binascii.hexlify(hashlib.new('md4', password.encode('utf-16le')).digest())
print hash_object
elif(gen_type == "md5_r_salt_hashonly"):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.md5(pass_obj).hexdigest()
print hash_object
elif(gen_type == "sha1_r_salt_hashonly"):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha1(pass_obj).hexdigest()
print hash_object
elif(gen_type == "sha256_r_salt_hashonly"):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha256(pass_obj).hexdigest()
print hash_object
elif(gen_type == "sha512_r_salt_hashonly"):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits) for _ in range(random.randrange(6,16)))
hash_object = hashlib.sha512(pass_obj).hexdigest()
print hash_object
elif(gen_type == "ntlm_r_salt_hashonly"):
pass_obj = password+''.join(random.choice(string.ascii_letters + string.digits))
hash_object = binascii.hexlify(hashlib.new('md4', pass_obj.encode('utf-16le')).digest())
print hash_object
elif(gen_type == "ad_compromise"):
#https://technet.microsoft.com/en-us/library/active-directory-maximum-limits-scalability(v=ws.10).aspx
# based on above making 6m limit on dump
if (max_num is 6000000):
break
sys.exit(1)
else:
hash_object = "%s:aad3b435b51404eeaad3b435b51404ee:%s:::" % (str(x),binascii.hexlify(hashlib.new('md4', password.encode('utf-16le')).digest()))
print hash_object
else:
print "[!] Unknown hash type, exit"
sys.exit(1)
def help():
types="""
-= LeakGenerator v1.1A by op7ic =-
Discover your own leak with spoofed emails, random passwords and equally random hashes
Supported hash types for '-t' argument:
[+] Main hash alghoritms. Will print "email : password : hash" combo.
md5
sha1
sha256
sha512
ntlm
[+] Hash alghoritms with random salt added. Will print "email : password : hash" combo.
md5_r_salt
sha1_r_salt
sha256_r_salt
sha512_r_salt
ntlm_r_salt
[+] Other type. Will print "email or username : password" combo.
clear
ad_compromise
[+] Hash only types that print only 'hash' values
md5_hashonly
sha1_hashonly
sha256_hashonly
sha512_hashonly
ntlm_only
md5_r_salt_hashonly
sha1_r_salt_hashonly
sha256_r_salt_hashonly
sha512_r_salt_hashonly
ntlm_r_salt_hashonly
You also need to specify max passwords you want to generate in hex format e.g. 0x00FFFFFF
The final command would look like this:
python leakme.py -t md5_r_salt -m 0x00FFFFFF
"""
return types
parser = optparse.OptionParser(usage=help())
parser.add_option('-m', '--max', help = "Number of hashes to generate in hex format e.g. --max=0x00FFFFFF",action="store", dest="max_dump")
parser.add_option('-t', '--type', help="Hash type to print, use -h or --help to see all applicable hash types",action="store", dest="hash_type")
(opts, args) = parser.parse_args()
generate(opts.max_dump,opts.hash_type) | mit |
jm-begon/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
lehnertu/TEUFEL | scripts/plot_Screen_TD.py | 1 | 4311 | #!/usr/bin/env python
# coding=UTF-8
import sys, time
import os.path
import argparse
import numpy as np
import h5py
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib.patches import Circle
# magnetic field constant in N/A²
mu0 = 4*np.pi*1e-7
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the file name of the screen output HDF5 file')
parser.add_argument('-xy', help="indeces of plot point", dest="xy", type=int, nargs=2)
print
args = parser.parse_args()
radfile = args.file
radOK = os.path.isfile(radfile)
if not radOK:
print "file not found"
sys.exit()
# Open the file for reading
print "reading ",radfile
hdf = h5py.File(radfile, "r")
print hdf
print
# Get the groups
pos = hdf['ObservationPosition']
Nx = pos.attrs.get('Nx')
Ny = pos.attrs.get('Ny')
print "Nx=%d Ny=%d" % (Nx,Ny)
print pos
field = hdf['ElMagField']
print field
t0 = field.attrs.get('t0')
dt = field.attrs.get('dt')
nots = field.attrs.get('NOTS')
print "t0=%g dt=%g NOTS=%d" % (t0, dt, nots)
pos = np.array(pos)
a = np.array(field)
hdf.close()
print
xcenter = (Nx-1)/2
ycenter = (Ny-1)/2
print "center = (",xcenter,",",ycenter,")"
centerposition = pos[xcenter][ycenter]
print "center position = ",centerposition
onaxis = a[xcenter][ycenter]
data = onaxis.transpose()
Ex = data[0]
Ey = data[1]
Ez = data[2]
Bx = data[3]
By = data[4]
Bz = data[5]
EVec = np.array([Ex, Ey, Ez]).transpose()
BVec = np.array([Bx, By, Bz]).transpose()
# Poynting vector in V/m * (N/(A m)) / (N/A²) = W/m²
SVec = np.cross(EVec, BVec) / mu0
# t = 1e9*np.arange(t0,t0+(nots-1)*dt,dt)
t = 1e9*np.linspace(t0,t0+(nots-1)*dt,nots)
print 'on axis energy flow density = ', 1e6*SVec.sum(axis=0)*dt, " µJ/m²"
# first figure with the time-trace of the fields on axis
left, width = 0.15, 0.80
rect1 = [left, 0.55, width, 0.40] #left, bottom, width, height
rect2 = [left, 0.08, width, 0.40]
fig = plt.figure(1,figsize=(12,9))
ax1 = fig.add_axes(rect1)
ax4 = fig.add_axes(rect2, sharex=ax1)
l1 = ax1.plot(t, Ex, "r-", label=r'$E_x$')
l2 = ax1.plot(t, Ey, "b-", label=r'$E_y$')
l3 = ax1.plot(t, Ez, "g-", label=r'$E_z$')
ax1.set_ylabel(r'$E$ [V/m]')
lines = l1 + l2 + l3
labels = [l.get_label() for l in lines]
ax1.legend(lines,labels,loc='upper right')
for label in ax1.get_xticklabels():
label.set_visible(False)
ax1.grid(True)
l4 = ax4.plot(t, Bx, "r-", label=r'$B_x$')
l5 = ax4.plot(t, By, "b-", label=r'$B_y$')
l6 = ax4.plot(t, Bz, "g-", label=r'$B_z$')
ax4.set_ylabel(r'$B$ [T]')
ax4.set_xlabel(r't [ns]')
lines = l4 + l5 +l6
labels = [l.get_label() for l in lines]
ax4.legend(lines,labels,loc='upper right')
ax4.grid(True)
if args.xy != None:
xi = args.xy[0]
yi = args.xy[1]
print "index = (",xi,",",yi,")"
position = pos[xi][yi]
print "off-axis position = ",position
offaxis = a[xi][yi]
data = offaxis.transpose()
Ex = data[0]
Ey = data[1]
Ez = data[2]
Bx = data[3]
By = data[4]
Bz = data[5]
EVec = np.array([Ex, Ey, Ez]).transpose()
BVec = np.array([Bx, By, Bz]).transpose()
# Poynting vector in V/m * (N/(A m)) / (N/A²) = W/m²
SVec = np.cross(EVec, BVec) / mu0
# t = 1e9*np.arange(t0,t0+(nots-1)*dt,dt)
t = 1e9*np.linspace(t0,t0+(nots-1)*dt,nots)
print 'off axis energy flow density = ', 1e6*SVec.sum(axis=0)*dt, " µJ/m²"
# second figure with the time-trace of the fields off axis
fig2 = plt.figure(2,figsize=(12,9))
ax21 = fig2.add_axes(rect1)
ax24 = fig2.add_axes(rect2, sharex=ax1)
l21 = ax21.plot(t, Ex, "r-", label=r'$E_x$')
l22 = ax21.plot(t, Ey, "b-", label=r'$E_y$')
l23 = ax21.plot(t, Ez, "g-", label=r'$E_z$')
ax21.set_ylabel(r'$E$ [V/m]')
lines = l21 + l22 + l23
labels = [l.get_label() for l in lines]
ax21.legend(lines,labels,loc='upper right')
for label in ax21.get_xticklabels():
label.set_visible(False)
ax21.grid(True)
l24 = ax24.plot(t, Bx, "r-", label=r'$B_x$')
l25 = ax24.plot(t, By, "b-", label=r'$B_y$')
l26 = ax24.plot(t, Bz, "g-", label=r'$B_z$')
ax24.set_ylabel(r'$B$ [T]')
ax24.set_xlabel(r't [ns]')
lines = l24 + l25 +l26
labels = [l.get_label() for l in lines]
ax24.legend(lines,labels,loc='upper right')
ax24.grid(True)
plt.show()
| gpl-3.0 |
henryre/shalo | shalo/model_search.py | 1 | 5519 | import numpy as np
import pandas as pd
import cPickle
import datetime
from itertools import product
class Hyperparameter(object):
"""Base class for a grid search parameter"""
def __init__(self, name):
self.name = name
def get_all_values(self):
raise NotImplementedError()
def draw_values(self, n):
# Multidim parameters can't use choice directly
v = self.get_all_values()
return [v[int(i)] for i in np.random.choice(len(v), n)]
class ListParameter(Hyperparameter):
"""List of parameter values for searching"""
def __init__(self, name, parameter_list):
self.parameter_list = np.array(parameter_list)
super(ListParameter, self).__init__(name)
def get_all_values(self):
return self.parameter_list
class RangeParameter(Hyperparameter):
"""
Range of parameter values for searching.
min_value and max_value are the ends of the search range
If log_base is specified, scale the search range in the log base
step is range step size or exponent step size
"""
def __init__(self, name, min_value, max_value, step=1, log_base=None):
self.min_value = min_value
self.max_value = max_value
self.step = step
self.log_base = log_base
super(RangeParameter, self).__init__(name)
def get_all_values(self):
if self.log_base:
min_exp = math.log(self.min_value, self.log_base)
max_exp = math.log(self.max_value, self.log_base)
exps = np.arange(min_exp, max_exp + self.step, step=self.step)
return np.power(self.log_base, exps)
return np.arange(
self.min_value, self.max_value + self.step, step=self.step
)
class GridSearch(object):
"""
Runs hyperparameter grid search over a model object with train and score methods,
training data (X), and training_marginals
Selects based on maximizing F1 score on a supplied validation set
Specify search space with Hyperparameter arguments
"""
def __init__(self, model, train_data, train_labels, parameters):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.params = parameters
self.param_names = [param.name for param in parameters]
def search_space(self):
return product(param.get_all_values() for param in self.params)
def fit(self, dev_data, dev_labels, b=0.5, **model_hyperparams):
"""
Basic method to start grid search, returns DataFrame table of results
b specifies the positive class threshold for calculating accuracy
Non-search parameters are set using model_hyperparamters
"""
run_stats, score_opt, model_k = [], -1.0, 0
opt_params = None
base_model_name = self.model.name
# Iterate over the param values
for k, param_vals in enumerate(self.search_space()):
model_name = '{0}_{1}'.format(base_model_name, model_k)
model_k += 1
# Set the new hyperparam configuration to test
for pn, pv in zip(self.param_names, param_vals):
model_hyperparams[pn] = pv
print "=" * 80
print "[%d] Testing %s" % (k+1, ', '.join([
"{0} = {1}".format(pn, pv)
for pn, pv in zip(self.param_names, param_vals)
]))
print "=" * 80
# Train the model
self.model.train(
self.train_data, self.train_labels,
dev_sentence_data=dev_data, dev_labels=dev_labels,
**model_hyperparams
)
# Test the model
score = self.model.score(dev_data, dev_labels, b=b, verbose=True)
run_stats.append(list(param_vals) + [score])
if score > score_opt:
#self.model.save(model_name)
opt_model = model_name
score_opt = score
opt_params = param_vals
# Store optimal params
optimal = {"name":opt_model, "params":param_vals}
with open("optimal_params"+str(datetime.datetime.now()), 'wb') as f:
cPickle.dump(optimal, f)
# Set optimal parameter in the learner model
#self.model.load(opt_model)
for pn, pv in zip(self.param_names, opt_params):
model_hyperparams[pn] = pv
self.model.train(
self.train_data, self.train_labels,
dev_sentence_data=dev_data, dev_labels=dev_labels,
**model_hyperparams
)
# Return DataFrame of scores
self.results = pd.DataFrame.from_records(
run_stats, columns=self.param_names + ['Accuracy']
).sort_values(by='Accuracy', ascending=False)
return self.results
class RandomSearch(GridSearch):
def __init__(self, model, train_data, train_labels, parameters, n=10):
"""Search a random sample of size n from a parameter grid"""
self.n = n
super(RandomSearch, self).__init__(
model, train_data, train_labels, parameters
)
print "Initialized RandomSearch of size {0} / {1}".format(
self.n, np.product([len(w) for w in GridSearch.search_space(self)])
)
def search_space(self):
return zip(*[param.draw_values(self.n) for param in self.params])
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/io/tests/test_clipboard.py | 7 | 4897 | # -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
import nose
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.util.clipboard.exceptions import PyperclipException
try:
DataFrame({'A': [1, 2]}).to_clipboard()
except PyperclipException:
raise nose.SkipTest("clipboard primitives not installed")
class TestClipboard(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestClipboard, cls).setUpClass()
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
# unicode round trip test for GH 13747, GH 12529
cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
def tearDownClass(cls):
super(TestClipboard, cls).tearDownClass()
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None,
encoding=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
from textwrap import dedent
from pandas.util.clipboard import clipboard_set
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard()
# excel data is parsed correctly
self.assertEqual(df.iloc[1][1], 'Harry Carney')
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard()
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard()
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self):
# test case for testing invalid encoding
data = self.data['string']
with tm.assertRaises(ValueError):
data.to_clipboard(encoding='ascii')
with tm.assertRaises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
def test_round_trip_valid_encodings(self):
for enc in ['UTF-8', 'utf-8', 'utf8']:
for dt in self.data_types:
self.check_round_trip_frame(dt, encoding=enc)
| apache-2.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_NonLinHardShear/Area/A_1e-4/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
trmznt/genaf | genaf/__init__.py | 1 | 5963 | import logging
log = logging.getLogger(__name__)
import matplotlib
matplotlib.use('Agg')
log.info('Setting up matplotlib to use Agg')
from pyramid.config import Configurator
from rhombus import includeme as rho_includeme, init_app as rhombus_init_app, add_route_view
from rhombus.lib.utils import cout, cerr, cexit, generic_userid_func
from rhombus.lib.fsoverlay import fsomount
from rhombus.models.core import set_func_userid
from genaf.lib.procmgmt import init_queue
from genaf.lib.configs import set_temp_path, get_temp_path, TEMP_TOOLS
import os
def includeme( config ):
# GenAF configuration
#config.add_static_view('genaf_assets', 'genaf:static/assets/')
config.add_static_view(name='genaf_static', path="genaf:static/")
add_route_view( config, 'genaf.views.marker', 'genaf.marker',
'/marker',
'/marker/@@action',
'/marker/{id}@@edit',
'/marker/{id}@@save',
('/marker/{id}', 'view')
)
add_route_view( config, 'genaf.views.panel', 'genaf.panel',
'/panel',
'/panel/@@action',
'/panel/{id}@@edit',
'/panel/{id}@@save',
('/panel/{id}', 'view')
)
add_route_view( config, 'genaf.views.batch', 'genaf.batch',
'/batch',
'/batch/@@action',
'/batch/{id}@@edit',
'/batch/{id}@@save',
('/batch/{id}', 'view')
)
add_route_view( config, 'genaf.views.sample', 'genaf.sample',
'/sample',
'/sample/@@action',
'/sample/{id}@@edit',
'/sample/{id}@@save',
('/sample/{id}', 'view')
)
add_route_view( config, 'genaf.views.location', 'genaf.location',
'/location',
'/location/@@action',
'/location/{id}@@edit',
'/location/{id}@@save',
('/location/{id}', 'view')
)
add_route_view( config, 'genaf.views.assay', 'genaf.assay',
'/assay',
'/assay/@@action',
'/assay/{id}@@drawchannels',
'/assay/{id}@@edit',
'/assay/{id}@@save',
('/assay/{id}', 'view')
)
add_route_view( config, 'genaf.views.channel', 'genaf.channel',
'/channel/@@action',
('/channel/{id}', 'view'),
)
add_route_view( config, 'genaf.views.uploadmgr', 'genaf.uploadmgr',
'/uploadmgr',
'/uploadmgr/@@action',
'/uploadmgr/{id}@@edit',
'/uploadmgr/{id}@@save',
('/uploadmgr/{id}@@mainpanel', 'mainpanel', 'json'),
('/uploadmgr/{id}@@rpc', 'rpc', 'json'),
('/uploadmgr/{id}@@uploaddata', 'uploaddata', 'json'),
('/uploadmgr/{id}@@uploadinfo', 'uploadinfo', 'json'),
'/uploadmgr/{id}@@template',
('/uploadmgr/{id}', 'view')
)
add_route_view( config, 'genaf.views.famgr', 'genaf.famgr',
'/famgr',
'/famgr/{id}@@process',
('/famgr/{id}', 'view')
)
add_route_view( config, 'genaf.views.task', 'genaf.task',
'/task',
('/task/{id}', 'view'),
)
add_route_view( config, 'rhombus.views.fso', 'rhombus.fso',
'/fso{path:.*}@@view',
'/fso{path:.*}@@edit',
'/fso{path:.*}@@save',
'/fso{path:.*}@@action',
('/fso{path:.*}', 'index'),
)
# tools and analysis
config.add_route('tools-help', '/tools/help')
config.add_view('genaf.views.tools.help.index', route_name='tools-help')
config.add_route('tools-allele', '/tools/allele')
config.add_view('genaf.views.tools.allele.index', route_name='tools-allele')
config.add_route('tools-he', '/tools/he')
config.add_view('genaf.views.tools.he.index', route_name='tools-he')
config.add_route('tools-genotype', '/tools/genotype')
config.add_view('genaf.views.tools.genotype.index', route_name='tools-genotype')
config.add_route('tools-moi', '/tools/moi')
config.add_view('genaf.views.tools.moi.index', route_name='tools-moi')
config.add_route('tools-pcoa', '/tools/pcoa')
config.add_view('genaf.views.tools.pcoa.index', route_name='tools-pcoa')
config.add_route('tools-mca', '/tools/mca')
config.add_view('genaf.views.tools.mca.index', route_name='tools-mca')
config.add_route('tools-export', '/tools/export')
config.add_view('genaf.views.tools.export.index', route_name='tools-export')
config.add_route('tools-fst', '/tools/fst')
config.add_view('genaf.views.tools.fst.index', route_name='tools-fst')
config.add_route('tools-ld', '/tools/ld')
config.add_view('genaf.views.tools.ld.index', route_name='tools-ld')
config.add_route('tools-nj', '/tools/nj')
config.add_view('genaf.views.tools.nj.index', route_name='tools-nj')
config.add_route('tools-sample', '/tools/sample')
config.add_view('genaf.views.tools.sample.index', route_name='tools-sample')
config.add_route('tools-djost', '/tools/djost')
config.add_view('genaf.views.tools.djost.index', route_name='tools-djost')
# utilities
config.add_route('utils-export', '/utils/export')
config.add_view('genaf.views.utils.export.index', route_name='utils-export')
config.add_route('utils-plot', '/utils/plot')
config.add_view('genaf.views.utils.plot.index', route_name='utils-plot')
def init_app( global_config, settings, prefix = '/mgr' ):
# global, shared settings
temp_path = settings['genaf.temp_directory']
set_temp_path( temp_path )
fsomount(TEMP_TOOLS, get_temp_path('', TEMP_TOOLS))
set_func_userid( generic_userid_func )
# preparing for multiprocessing
init_queue(settings)
config = rhombus_init_app( global_config, settings, prefix=prefix )
return config
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.scan()
return config.make_wsgi_app()
| lgpl-3.0 |
ChinaQuants/zipline | zipline/protocol.py | 3 | 17052 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
def minute_get_bars(days):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
env = get_algo_instance().trading_environment
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
eickenberg/scikit-learn | sklearn/decomposition/nmf.py | 1 | 18931 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
Remarks
-------
This implements the algorithm described in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
Reference
---------
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
`components_` : array, [n_components, n_features]
Non-negative components of the data.
`reconstruction_err_` : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
ndingwall/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 19 | 3140 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess whether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 38107 | from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone, ClassifierMixin
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning, NotFittedError
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises(NotFittedError, transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 4 * rnd.uniform(size=(10, 3)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
try:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
except NotImplementedError:
# FIXME
# non-standard handling of ducktyping in BaggingEstimator
pass
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_unfitted(name, Estimator):
"""Check if NotFittedError is raised when calling predict and related
functions"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'predict'):
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'decision_function'):
assert_raises(NotFittedError, est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raises(NotFittedError, est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raises(NotFittedError, est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
| bsd-3-clause |
mailhexu/pyDFTutils | build/lib/pyDFTutils/wannier90/pythtb.py | 2 | 153556 | from __future__ import print_function
# PythTB python tight binding module.
# December 22, 2016
__version__='1.7.1'
# Copyright 2010, 2012, 2016 by Sinisa Coh and David Vanderbilt
#
# This file is part of PythTB. PythTB is free software: you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# PythTB is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# A copy of the GNU General Public License should be available
# alongside this source in a file named gpl-3.0.txt. If not,
# see <http://www.gnu.org/licenses/>.
#
# PythTB is availabe at http://www.physics.rutgers.edu/pythtb/
import numpy as np # numerics for matrices
import sys # for exiting
import copy # for deepcopying
class tb_model(object):
r"""
This is the main class of the PythTB package which contains all
information for the tight-binding model.
:param dim_k: Dimensionality of reciprocal space, i.e., specifies how
many directions are considered to be periodic.
:param dim_r: Dimensionality of real space, i.e., specifies how many
real space lattice vectors there are and how many coordinates are
needed to specify the orbital coordinates.
.. note:: Parameter *dim_r* can be larger than *dim_k*! For example,
a polymer is a three-dimensional molecule (one needs three
coordinates to specify orbital positions), but it is periodic
along only one direction. For a polymer, therefore, we should
have *dim_k* equal to 1 and *dim_r* equal to 3. See similar example
here: :ref:`trestle-example`.
:param lat: Array containing lattice vectors in Cartesian
coordinates (in arbitrary units). In example the below, the first
lattice vector has coordinates [1.0,0.5] while the second
one has coordinates [0.0,2.0]. By default, lattice vectors
are an identity matrix.
:param orb: Array containing reduced coordinates of all
tight-binding orbitals. In the example below, the first
orbital is defined with reduced coordinates [0.2,0.3]. Its
Cartesian coordinates are therefore 0.2 times the first
lattice vector plus 0.3 times the second lattice vector.
If *orb* is an integer code will assume that there are these many
orbitals all at the origin of the unit cell. By default
the code will assume a single orbital at the origin.
:param per: This is an optional parameter giving a list of lattice
vectors which are considered to be periodic. In the example below,
only the vector [0.0,2.0] is considered to be periodic (since
per=[1]). By default, all lattice vectors are assumed to be
periodic. If dim_k is smaller than dim_r, then by default the first
dim_k vectors are considered to be periodic.
:param nspin: Number of explicit spin components assumed for each
orbital in *orb*. Allowed values of *nspin* are *1* and *2*. If
*nspin* is 1 then the model is spinless, if *nspin* is 2 then it
is explicitly a spinfull model and each orbital is assumed to
have two spin components. Default value of this parameter is
*1*. Of course one can make spinfull calculation even with
*nspin* set to 1, but then the user must keep track of which
orbital corresponds to which spin component.
Example usage::
# Creates model that is two-dimensional in real space but only
# one-dimensional in reciprocal space. Second lattice vector is
# chosen to be periodic (since per=[1]). Three orbital
# coordinates are specified.
tb = tb_model(1, 2,
lat=[[1.0, 0.5], [0.0, 2.0]],
orb=[[0.2, 0.3], [0.1, 0.1], [0.2, 0.2]],
per=[1])
"""
def __init__(self,dim_k,dim_r,lat=None,orb=None,per=None,nspin=1):
# initialize _dim_k = dimensionality of k-space (integer)
if type(dim_k).__name__!='int':
raise Exception("\n\nArgument dim_k not an integer")
if dim_k < 0 or dim_k > 4:
raise Exception("\n\nArgument dim_k out of range. Must be between 0 and 4.")
self._dim_k=dim_k
# initialize _dim_r = dimensionality of r-space (integer)
if type(dim_r).__name__!='int':
raise Exception("\n\nArgument dim_r not an integer")
if dim_r < dim_k or dim_r > 4:
raise Exception("\n\nArgument dim_r out of range. Must be dim_r>=dim_k and dim_r<=4.")
self._dim_r=dim_r
# initialize _lat = lattice vectors, array of dim_r*dim_r
# format is _lat(lat_vec_index,cartesian_index)
# special option: 'unit' implies unit matrix, also default value
if lat is 'unit' or lat is None:
self._lat=np.identity(dim_r,float)
print(" Lattice vectors not specified! I will use identity matrix.")
elif type(lat).__name__ not in ['list','ndarray']:
raise Exception("\n\nArgument lat is not a list.")
else:
self._lat=np.array(lat,dtype=float)
if self._lat.shape!=(dim_r,dim_r):
raise Exception("\n\nWrong lat array dimensions")
# check that volume is not zero and that have right handed system
if dim_r>0:
if np.abs(np.linalg.det(self._lat))<1.0E-6:
raise Exception("\n\nLattice vectors length/area/volume too close to zero, or zero.")
if np.linalg.det(self._lat)<0.0:
raise Exception("\n\nLattice vectors need to form right handed system.")
# initialize _norb = number of basis orbitals per cell
# and _orb = orbital locations, in reduced coordinates
# format is _orb(orb_index,lat_vec_index)
# special option: 'bravais' implies one atom at origin
if orb is 'bravais' or orb is None:
self._norb=1
self._orb=np.zeros((1,dim_r))
print(" Orbital positions not specified. I will assume a single orbital at the origin.")
elif type(orb).__name__=='int':
self._norb=orb
self._orb=np.zeros((orb,dim_r))
print(" Orbital positions not specified. I will assume ",orb," orbitals at the origin")
elif type(orb).__name__ not in ['list','ndarray']:
raise Exception("\n\nArgument orb is not a list or an integer")
else:
self._orb=np.array(orb,dtype=float)
if len(self._orb.shape)!=2:
raise Exception("\n\nWrong orb array rank")
self._norb=self._orb.shape[0] # number of orbitals
if self._orb.shape[1]!=dim_r:
raise Exception("\n\nWrong orb array dimensions")
# choose which self._dim_k out of self._dim_r dimensions are
# to be considered periodic.
if per==None:
# by default first _dim_k dimensions are periodic
self._per=list(range(self._dim_k))
else:
if len(per)!=self._dim_k:
raise Exception("\n\nWrong choice of periodic/infinite direction!")
# store which directions are the periodic ones
self._per=per
# remember number of spin components
if nspin not in [1,2]:
raise Exception("\n\nWrong value of nspin, must be 1 or 2!")
self._nspin=nspin
# by default, assume model did not come from w90 object and that
# position operator is diagonal
self._assume_position_operator_diagonal=True
# compute number of electronic states at each k-point
self._nsta=self._norb*self._nspin
# Initialize onsite energies to zero
if self._nspin==1:
self._site_energies=np.zeros((self._norb),dtype=float)
elif self._nspin==2:
self._site_energies=np.zeros((self._norb,2,2),dtype=complex)
# remember which onsite energies user has specified
self._site_energies_specified=np.zeros(self._norb,dtype=bool)
self._site_energies_specified[:]=False
# Initialize hoppings to empty list
self._hoppings=[]
# The onsite energies and hoppings are not specified
# when creating a 'tb_model' object. They are speficied
# subsequently by separate function calls defined below.
def set_onsite(self,onsite_en,ind_i=None,mode="set"):
r"""
Defines on-site energies for tight-binding orbitals. One can
either set energy for one tight-binding orbital, or all at
once.
.. warning:: In previous version of PythTB this function was
called *set_sites*. For backwards compatibility one can still
use that name but that feature will be removed in future
releases.
:param onsite_en: Either a list of on-site energies (in
arbitrary units) for each orbital, or a single on-site
energy (in this case *ind_i* parameter must be given). In
the case when *nspin* is *1* (spinless) then each on-site
energy is a single number. If *nspin* is *2* then on-site
energy can be given either as a single number, or as an
array of four numbers, or 2x2 matrix. If a single number is
given, it is interpreted as on-site energy for both up and
down spin component. If an array of four numbers is given,
these are the coefficients of I, sigma_x, sigma_y, and
sigma_z (that is, the 2x2 identity and the three Pauli spin
matrices) respectively. Finally, full 2x2 matrix can be
given as well. If this function is never called, on-site
energy is assumed to be zero.
:param ind_i: Index of tight-binding orbital whose on-site
energy you wish to change. This parameter should be
specified only when *onsite_en* is a single number (not a
list).
:param mode: Similar to parameter *mode* in function set_hop*.
Speficies way in which parameter *onsite_en* is
used. It can either set value of on-site energy from scratch,
reset it, or add to it.
* "set" -- Default value. On-site energy is set to value of
*onsite_en* parameter. One can use "set" on each
tight-binding orbital only once.
* "reset" -- Specifies on-site energy to given value. This
function can be called multiple times for the same
orbital(s).
* "add" -- Adds to the previous value of on-site
energy. This function can be called multiple times for the
same orbital(s).
Example usage::
# Defines on-site energy of first orbital to be 0.0,
# second 1.0, and third 2.0
tb.set_onsite([0.0, 1.0, 2.0])
# Increases value of on-site energy for second orbital
tb.set_onsite(100.0, 1, mode="add")
# Changes on-site energy of second orbital to zero
tb.set_onsite(0.0, 1, mode="reset")
# Sets all three on-site energies at once
tb.set_onsite([2.0, 3.0, 4.0], mode="reset")
"""
if ind_i==None:
if (len(onsite_en)!=self._norb):
raise Exception("\n\nWrong number of site energies")
# make sure ind_i is not out of scope
if ind_i!=None:
if ind_i<0 or ind_i>=self._norb:
raise Exception("\n\nIndex ind_i out of scope.")
# make sure that onsite terms are real/hermitian
if ind_i!=None:
to_check=[onsite_en]
else:
to_check=onsite_en
for ons in to_check:
if np.array(ons).shape==():
if np.abs(np.array(ons)-np.array(ons).conjugate())>1.0E-8:
raise Exception("\n\nOnsite energy should not have imaginary part!")
elif np.array(ons).shape==(4,):
if np.max(np.abs(np.array(ons)-np.array(ons).conjugate()))>1.0E-8:
raise Exception("\n\nOnsite energy or Zeeman field should not have imaginary part!")
elif np.array(ons).shape==(2,2):
if np.max(np.abs(np.array(ons)-np.array(ons).T.conjugate()))>1.0E-8:
raise Exception("\n\nOnsite matrix should be Hermitian!")
# specifying onsite energies from scratch, can be called only once
if mode.lower()=="set":
# specifying only one site at a time
if ind_i!=None:
# make sure we specify things only once
if self._site_energies_specified[ind_i]==True:
raise Exception("\n\nOnsite energy for this site was already specified! Use mode=\"reset\" or mode=\"add\".")
else:
self._site_energies[ind_i]=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
# make sure we specify things only once
if True in self._site_energies_specified[ind_i]:
raise Exception("\n\nSome or all onsite energies were already specified! Use mode=\"reset\" or mode=\"add\".")
else:
for i in range(self._norb):
self._site_energies[i]=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
# reset values of onsite terms, without adding to previous value
elif mode.lower()=="reset":
# specifying only one site at a time
if ind_i!=None:
self._site_energies[ind_i]=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
for i in range(self._norb):
self._site_energies[i]=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
# add to previous value
elif mode.lower()=="add":
# specifying only one site at a time
if ind_i!=None:
self._site_energies[ind_i]+=self._val_to_block(onsite_en)
self._site_energies_specified[ind_i]=True
# specifying all sites at once
else:
for i in range(self._norb):
self._site_energies[i]+=self._val_to_block(onsite_en[i])
self._site_energies_specified[:]=True
else:
raise Exception("\n\nWrong value of mode parameter")
def set_hop(self,hop_amp,ind_i,ind_j,ind_R=None,mode="set",allow_conjugate_pair=False):
r"""
Defines hopping parameters between tight-binding orbitals. In
the notation used in section 3.1 equation 3.6 of
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` this function specifies the
following object
.. math::
H_{ij}({\bf R})= \langle \phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle
Where :math:`\langle \phi_{{\bf 0} i} \vert` is i-th
tight-binding orbital in the home unit cell and
:math:`\vert \phi_{{\bf R},j} \rangle` is j-th tight-binding orbital in
unit cell shifted by lattice vector :math:`{\bf R}`. :math:`H`
is the Hamiltonian.
(Strictly speaking, this term specifies hopping amplitude
for hopping from site *j+R* to site *i*, not vice-versa.)
Hopping in the opposite direction is automatically included by
the code since
.. math::
H_{ji}(-{\bf R})= \left[ H_{ij}({\bf R}) \right]^{*}
.. warning::
There is no need to specify hoppings in both :math:`i
\rightarrow j+R` direction and opposite :math:`j
\rightarrow i-R` direction since that is done
automatically. If you want to specifiy hoppings in both
directions, see description of parameter
*allow_conjugate_pair*.
.. warning:: In previous version of PythTB this function was
called *add_hop*. For backwards compatibility one can still
use that name but that feature will be removed in future
releases.
:param hop_amp: Hopping amplitude; can be real or complex
number, equals :math:`H_{ij}({\bf R})`. If *nspin* is *2*
then hopping amplitude can be given either as a single
number, or as an array of four numbers, or as 2x2 matrix. If
a single number is given, it is interpreted as hopping
amplitude for both up and down spin component. If an array
of four numbers is given, these are the coefficients of I,
sigma_x, sigma_y, and sigma_z (that is, the 2x2 identity and
the three Pauli spin matrices) respectively. Finally, full
2x2 matrix can be given as well.
:param ind_i: Index of bra orbital from the bracket :math:`\langle
\phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle`. This
orbital is assumed to be in the home unit cell.
:param ind_j: Index of ket orbital from the bracket :math:`\langle
\phi_{{\bf 0} i} \vert H \vert \phi_{{\bf R},j} \rangle`. This
orbital does not have to be in the home unit cell; its unit cell
position is determined by parameter *ind_R*.
:param ind_R: Specifies, in reduced coordinates, the shift of
the ket orbital. The number of coordinates must equal the
dimensionality in real space (*dim_r* parameter) for consistency,
but only the periodic directions of ind_R will be considered. If
reciprocal space is zero-dimensional (as in a molecule),
this parameter does not need to be specified.
:param mode: Similar to parameter *mode* in function *set_onsite*.
Speficies way in which parameter *hop_amp* is
used. It can either set value of hopping term from scratch,
reset it, or add to it.
* "set" -- Default value. Hopping term is set to value of
*hop_amp* parameter. One can use "set" for each triplet of
*ind_i*, *ind_j*, *ind_R* only once.
* "reset" -- Specifies on-site energy to given value. This
function can be called multiple times for the same triplet
*ind_i*, *ind_j*, *ind_R*.
* "add" -- Adds to the previous value of hopping term This
function can be called multiple times for the same triplet
*ind_i*, *ind_j*, *ind_R*.
If *set_hop* was ever called with *allow_conjugate_pair* set
to True, then it is possible that user has specified both
:math:`i \rightarrow j+R` and conjugate pair :math:`j
\rightarrow i-R`. In this case, "set", "reset", and "add"
parameters will treat triplet *ind_i*, *ind_j*, *ind_R* and
conjugate triplet *ind_j*, *ind_i*, *-ind_R* as distinct.
:param allow_conjugate_pair: Default value is *False*. If set
to *True* code will allow user to specify hopping
:math:`i \rightarrow j+R` even if conjugate-pair hopping
:math:`j \rightarrow i-R` has been
specified. If both terms are specified, code will
still count each term two times.
Example usage::
# Specifies complex hopping amplitude between first orbital in home
# unit cell and third orbital in neigbouring unit cell.
tb.set_hop(0.3+0.4j, 0, 2, [0, 1])
# change value of this hopping
tb.set_hop(0.1+0.2j, 0, 2, [0, 1], mode="reset")
# add to previous value (after this function call below,
# hopping term amplitude is 100.1+0.2j)
tb.set_hop(100.0, 0, 2, [0, 1], mode="add")
"""
#
if self._dim_k!=0 and (ind_R is None):
raise Exception("\n\nNeed to specify ind_R!")
# if necessary convert from integer to array
if self._dim_k==1 and type(ind_R).__name__=='int':
tmpR=np.zeros(self._dim_r,dtype=int)
tmpR[self._per]=ind_R
ind_R=tmpR
# check length of ind_R
if self._dim_k!=0:
if len(ind_R)!=self._dim_r:
raise Exception("\n\nLength of input ind_R vector must equal dim_r! Even if dim_k<dim_r.")
# make sure ind_i and ind_j are not out of scope
if ind_i<0 or ind_i>=self._norb:
raise Exception("\n\nIndex ind_i out of scope.")
if ind_j<0 or ind_j>=self._norb:
raise Exception("\n\nIndex ind_j out of scope.")
# do not allow onsite hoppings to be specified here because then they
# will be double-counted
if self._dim_k==0:
if ind_i==ind_j:
raise Exception("\n\nDo not use set_hop for onsite terms. Use set_onsite instead!")
else:
if ind_i==ind_j:
all_zer=True
for k in self._per:
if int(ind_R[k])!=0:
all_zer=False
if all_zer==True:
raise Exception("\n\nDo not use set_hop for onsite terms. Use set_onsite instead!")
#
# make sure that if <i|H|j+R> is specified that <j|H|i-R> is not!
if allow_conjugate_pair==False:
for h in self._hoppings:
if ind_i==h[2] and ind_j==h[1]:
if self._dim_k==0:
raise Exception(\
"""\n
Following matrix element was already implicitely specified:
i="""+str(ind_i)+" j="+str(ind_j)+"""
Remember, specifying <i|H|j> automatically specifies <j|H|i>. For
consistency, specify all hoppings for a given bond in the same
direction. (Or, alternatively, see the documentation on the
'allow_conjugate_pair' flag.)
""")
elif False not in (np.array(ind_R)[self._per]==(-1)*np.array(h[3])[self._per]):
raise Exception(\
"""\n
Following matrix element was already implicitely specified:
i="""+str(ind_i)+" j="+str(ind_j)+" R="+str(ind_R)+"""
Remember,specifying <i|H|j+R> automatically specifies <j|H|i-R>. For
consistency, specify all hoppings for a given bond in the same
direction. (Or, alternatively, see the documentation on the
'allow_conjugate_pair' flag.)
""")
# convert to 2by2 matrix if needed
hop_use=self._val_to_block(hop_amp)
# hopping term parameters to be stored
if self._dim_k==0:
new_hop=[hop_use,int(ind_i),int(ind_j)]
else:
new_hop=[hop_use,int(ind_i),int(ind_j),np.array(ind_R)]
#
# see if there is a hopping term with same i,j,R
use_index=None
for iih,h in enumerate(self._hoppings):
# check if the same
same_ijR=False
if ind_i==h[1] and ind_j==h[2]:
if self._dim_k==0:
same_ijR=True
else:
if False not in (np.array(ind_R)[self._per]==np.array(h[3])[self._per]):
same_ijR=True
# if they are the same then store index of site at which they are the same
if same_ijR==True:
use_index=iih
#
# specifying hopping terms from scratch, can be called only once
if mode.lower()=="set":
# make sure we specify things only once
if use_index!=None:
raise Exception("\n\nHopping energy for this site was already specified! Use mode=\"reset\" or mode=\"add\".")
else:
self._hoppings.append(new_hop)
# reset value of hopping term, without adding to previous value
elif mode.lower()=="reset":
if use_index!=None:
self._hoppings[use_index]=new_hop
else:
self._hoppings.append(new_hop)
# add to previous value
elif mode.lower()=="add":
if use_index!=None:
self._hoppings[use_index][0]+=new_hop[0]
else:
self._hoppings.append(new_hop)
else:
raise Exception("\n\nWrong value of mode parameter")
def _val_to_block(self,val):
"""If nspin=2 then returns a 2 by 2 matrix from the input
parameters. If only one real number is given in the input then
assume that this is the diagonal term. If array with four
elements is given then first one is the diagonal term, and
other three are Zeeman field direction. If given a 2 by 2
matrix, just return it. If nspin=1 then just returns val."""
# spinless case
if self._nspin==1:
return val
# spinfull case
elif self._nspin==2:
# matrix to return
ret=np.zeros((2,2),dtype=complex)
#
use_val=np.array(val)
# only one number is given
if use_val.shape==():
ret[0,0]+=use_val
ret[1,1]+=use_val
# if four numbers are given
elif use_val.shape==(4,):
# diagonal
ret[0,0]+=use_val[0]
ret[1,1]+=use_val[0]
# sigma_x
ret[0,1]+=use_val[1]
ret[1,0]+=use_val[1]
# sigma_y
ret[0,1]+=use_val[2]*(-1.0j)
ret[1,0]+=use_val[2]*( 1.0j)
# sigma_z
ret[0,0]+=use_val[3]
ret[1,1]+=use_val[3]*(-1.0)
# if 2 by 2 matrix is given
elif use_val.shape==(2,2):
return use_val
else:
raise Exception(\
"""\n
Wrong format of the on-site or hopping term. Must be single number, or
in the case of a spinfull model can be array of four numbers or 2x2
matrix.""")
return ret
def display(self):
r"""
Prints on the screen some information about this tight-binding
model. This function doesn't take any parameters.
"""
print('---------------------------------------')
print('report of tight-binding model')
print('---------------------------------------')
print('k-space dimension =',self._dim_k)
print('r-space dimension =',self._dim_r)
print('number of spin components =',self._nspin)
print('periodic directions =',self._per)
print('number of orbitals =',self._norb)
print('number of electronic states =',self._nsta)
print('lattice vectors:')
for i,o in enumerate(self._lat):
print(" #",_nice_int(i,2)," ===> [", end=' ')
for j,v in enumerate(o):
print(_nice_float(v,7,4), end=' ')
if j!=len(o)-1:
print(",", end=' ')
print("]")
print('positions of orbitals:')
for i,o in enumerate(self._orb):
print(" #",_nice_int(i,2)," ===> [", end=' ')
for j,v in enumerate(o):
print(_nice_float(v,7,4), end=' ')
if j!=len(o)-1:
print(",", end=' ')
print("]")
print('site energies:')
for i,site in enumerate(self._site_energies):
print(" #",_nice_int(i,2)," ===> ", end=' ')
if self._nspin==1:
print(_nice_float(site,7,4))
elif self._nspin==2:
print(str(site).replace("\n"," "))
print('hoppings:')
for i,hopping in enumerate(self._hoppings):
print("<",_nice_int(hopping[1],2),"| H |",_nice_int(hopping[2],2), end=' ')
if len(hopping)==4:
print("+ [", end=' ')
for j,v in enumerate(hopping[3]):
print(_nice_int(v,2), end=' ')
if j!=len(hopping[3])-1:
print(",", end=' ')
else:
print("]", end=' ')
print("> ===> ", end=' ')
if self._nspin==1:
print(_nice_complex(hopping[0],7,4))
elif self._nspin==2:
print(str(hopping[0]).replace("\n"," "))
print()
def visualize(self,dir_first,dir_second=None,eig_dr=None,draw_hoppings=True,ph_color="black"):
r"""
Rudimentary function for visualizing tight-binding model geometry,
hopping between tight-binding orbitals, and electron eigenstates.
If eigenvector is not drawn, then orbitals in home cell are drawn
as red circles, and those in neighboring cells are drawn with
different shade of red. Hopping term directions are drawn with
green lines connecting two orbitals. Origin of unit cell is
indicated with blue dot, while real space unit vectors are drawn
with blue lines.
If eigenvector is drawn, then electron eigenstate on each orbital
is drawn with a circle whose size is proportional to wavefunction
amplitude while its color depends on the phase. There are various
coloring schemes for the phase factor; see more details under
*ph_color* parameter. If eigenvector is drawn and coloring scheme
is "red-blue" or "wheel", all other elements of the picture are
drawn in gray or black.
:param dir_first: First index of Cartesian coordinates used for
plotting.
:param dir_second: Second index of Cartesian coordinates used for
plotting. For example if dir_first=0 and dir_second=2, and
Cartesian coordinates of some orbital is [2.0,4.0,6.0] then it
will be drawn at coordinate [2.0,6.0]. If dimensionality of real
space (*dim_r*) is zero or one then dir_second should not be
specified.
:param eig_dr: Optional parameter specifying eigenstate to
plot. If specified, this should be one-dimensional array of
complex numbers specifying wavefunction at each orbital in
the tight-binding basis. If not specified, eigenstate is not
drawn.
:param draw_hoppings: Optional parameter specifying whether to
draw all allowed hopping terms in the tight-binding
model. Default value is True.
:param ph_color: Optional parameter determining the way
eigenvector phase factors are translated into color. Default
value is "black". Convention of the wavefunction phase is as
in convention 1 in section 3.1 of :download:`notes on
tight-binding formalism <misc/pythtb-formalism.pdf>`. In
other words, these wavefunction phases are in correspondence
with cell-periodic functions :math:`u_{n {\bf k}} ({\bf r})`
not :math:`\Psi_{n {\bf k}} ({\bf r})`.
* "black" -- phase of eigenvectors are ignored and wavefunction
is always colored in black.
* "red-blue" -- zero phase is drawn red, while phases or pi or
-pi are drawn blue. Phases in between are interpolated between
red and blue. Some phase information is lost in this coloring
becase phase of +phi and -phi have same color.
* "wheel" -- each phase is given unique color. In steps of pi/3
starting from 0, colors are assigned (in increasing hue) as:
red, yellow, green, cyan, blue, magenta, red.
:returns:
* **fig** -- Figure object from matplotlib.pyplot module
that can be used to save the figure in PDF, EPS or similar
format, for example using fig.savefig("name.pdf") command.
* **ax** -- Axes object from matplotlib.pyplot module that can be
used to tweak the plot, for example by adding a plot title
ax.set_title("Title goes here").
Example usage::
# Draws x-y projection of tight-binding model
# tweaks figure and saves it as a PDF.
(fig, ax) = tb.visualize(0, 1)
ax.set_title("Title goes here")
fig.savefig("model.pdf")
See also these examples: :ref:`edge-example`,
:ref:`visualize-example`.
"""
# check the format of eig_dr
if not (eig_dr is None):
if eig_dr.shape!=(self._norb,):
raise Exception("\n\nWrong format of eig_dr! Must be array of size norb.")
# check that ph_color is correct
if ph_color not in ["black","red-blue","wheel"]:
raise Exception("\n\nWrong value of ph_color parameter!")
# check if dir_second had to be specified
if dir_second==None and self._dim_r>1:
raise Exception("\n\nNeed to specify index of second coordinate for projection!")
# start a new figure
import pylab as plt
fig=plt.figure(figsize=[plt.rcParams["figure.figsize"][0],
plt.rcParams["figure.figsize"][0]])
ax=fig.add_subplot(111, aspect='equal')
def proj(v):
"Project vector onto drawing plane"
coord_x=v[dir_first]
if dir_second==None:
coord_y=0.0
else:
coord_y=v[dir_second]
return [coord_x,coord_y]
def to_cart(red):
"Convert reduced to Cartesian coordinates"
return np.dot(red,self._lat)
# define colors to be used in plotting everything
# except eigenvectors
if (eig_dr is None) or ph_color=="black":
c_cell="b"
c_orb="r"
c_nei=[0.85,0.65,0.65]
c_hop="g"
else:
c_cell=[0.4,0.4,0.4]
c_orb=[0.0,0.0,0.0]
c_nei=[0.6,0.6,0.6]
c_hop=[0.0,0.0,0.0]
# determine color scheme for eigenvectors
def color_to_phase(ph):
if ph_color=="black":
return "k"
if ph_color=="red-blue":
ph=np.abs(ph/np.pi)
return [1.0-ph,0.0,ph]
if ph_color=="wheel":
if ph<0.0:
ph=ph+2.0*np.pi
ph=6.0*ph/(2.0*np.pi)
x_ph=1.0-np.abs(ph%2.0-1.0)
if ph>=0.0 and ph<1.0: ret_col=[1.0 ,x_ph,0.0 ]
if ph>=1.0 and ph<2.0: ret_col=[x_ph,1.0 ,0.0 ]
if ph>=2.0 and ph<3.0: ret_col=[0.0 ,1.0 ,x_ph]
if ph>=3.0 and ph<4.0: ret_col=[0.0 ,x_ph,1.0 ]
if ph>=4.0 and ph<5.0: ret_col=[x_ph,0.0 ,1.0 ]
if ph>=5.0 and ph<=6.0: ret_col=[1.0 ,0.0 ,x_ph]
return ret_col
# draw origin
ax.plot([0.0],[0.0],"o",c=c_cell,mec="w",mew=0.0,zorder=7,ms=4.5)
# first draw unit cell vectors which are considered to be periodic
for i in self._per:
# pick a unit cell vector and project it down to the drawing plane
vec=proj(self._lat[i])
ax.plot([0.0,vec[0]],[0.0,vec[1]],"-",c=c_cell,lw=1.5,zorder=7)
# now draw all orbitals
for i in range(self._norb):
# find position of orbital in cartesian coordinates
pos=to_cart(self._orb[i])
pos=proj(pos)
ax.plot([pos[0]],[pos[1]],"o",c=c_orb,mec="w",mew=0.0,zorder=10,ms=4.0)
# draw hopping terms
if draw_hoppings==True:
for h in self._hoppings:
# draw both i->j+R and i-R->j hop
for s in range(2):
# get "from" and "to" coordinates
pos_i=np.copy(self._orb[h[1]])
pos_j=np.copy(self._orb[h[2]])
# add also lattice vector if not 0-dim
if self._dim_k!=0:
if s==0:
pos_j[self._per]=pos_j[self._per]+h[3][self._per]
if s==1:
pos_i[self._per]=pos_i[self._per]-h[3][self._per]
# project down vector to the plane
pos_i=np.array(proj(to_cart(pos_i)))
pos_j=np.array(proj(to_cart(pos_j)))
# add also one point in the middle to bend the curve
prcnt=0.05 # bend always by this ammount
pos_mid=(pos_i+pos_j)*0.5
dif=pos_j-pos_i # difference vector
orth=np.array([dif[1],-1.0*dif[0]]) # orthogonal to difference vector
orth=orth/np.sqrt(np.dot(orth,orth)) # normalize
pos_mid=pos_mid+orth*prcnt*np.sqrt(np.dot(dif,dif)) # shift mid point in orthogonal direction
# draw hopping
all_pnts=np.array([pos_i,pos_mid,pos_j]).T
ax.plot(all_pnts[0],all_pnts[1],"-",c=c_hop,lw=0.75,zorder=8)
# draw "from" and "to" sites
ax.plot([pos_i[0]],[pos_i[1]],"o",c=c_nei,zorder=9,mew=0.0,ms=4.0,mec="w")
ax.plot([pos_j[0]],[pos_j[1]],"o",c=c_nei,zorder=9,mew=0.0,ms=4.0,mec="w")
# now draw the eigenstate
if not (eig_dr is None):
for i in range(self._norb):
# find position of orbital in cartesian coordinates
pos=to_cart(self._orb[i])
pos=proj(pos)
# find norm of eigenfunction at this point
nrm=(eig_dr[i]*eig_dr[i].conjugate()).real
# rescale and get size of circle
nrm_rad=2.0*nrm*float(self._norb)
# get color based on the phase of the eigenstate
phase=np.angle(eig_dr[i])
c_ph=color_to_phase(phase)
ax.plot([pos[0]],[pos[1]],"o",c=c_ph,mec="w",mew=0.0,ms=nrm_rad,zorder=11,alpha=0.8)
# center the image
# first get the current limit, which is probably tight
xl=ax.set_xlim()
yl=ax.set_ylim()
# now get the center of current limit
centx=(xl[1]+xl[0])*0.5
centy=(yl[1]+yl[0])*0.5
# now get the maximal size (lengthwise or heightwise)
mx=max([xl[1]-xl[0],yl[1]-yl[0]])
# set new limits
extr=0.05 # add some boundary as well
ax.set_xlim(centx-mx*(0.5+extr),centx+mx*(0.5+extr))
ax.set_ylim(centy-mx*(0.5+extr),centy+mx*(0.5+extr))
# return a figure and axes to the user
return (fig,ax)
def get_num_orbitals(self):
"Returns number of orbitals in the model."
return self._norb
def get_orb(self):
"Returns reduced coordinates of orbitals in format [orbital,coordinate.]"
return self._orb.copy()
def get_lat(self):
"Returns lattice vectors in format [vector,coordinate]."
return self._lat.copy()
def _gen_ham(self,k_input=None):
"""Generate Hamiltonian for a certain k-point,
K-point is given in reduced coordinates!"""
kpnt=np.array(k_input)
if not (k_input is None):
# if kpnt is just a number then convert it to an array
if len(kpnt.shape)==0:
kpnt=np.array([kpnt])
# check that k-vector is of corect size
if kpnt.shape!=(self._dim_k,):
raise Exception("\n\nk-vector of wrong shape!")
else:
if self._dim_k!=0:
raise Exception("\n\nHave to provide a k-vector!")
# zero the Hamiltonian matrix
if self._nspin==1:
ham=np.zeros((self._norb,self._norb),dtype=complex)
elif self._nspin==2:
ham=np.zeros((self._norb,2,self._norb,2),dtype=complex)
# modify diagonal elements
for i in range(self._norb):
if self._nspin==1:
ham[i,i]=self._site_energies[i]
elif self._nspin==2:
ham[i,:,i,:]=self._site_energies[i]
# go over all hoppings
for hopping in self._hoppings:
# get all data for the hopping parameter
if self._nspin==1:
amp=complex(hopping[0])
elif self._nspin==2:
amp=np.array(hopping[0],dtype=complex)
i=hopping[1]
j=hopping[2]
# in 0-dim case there is no phase factor
if self._dim_k>0:
ind_R=np.array(hopping[3],dtype=float)
# vector from one site to another
rv=-self._orb[i,:]+self._orb[j,:]+ind_R
# Take only components of vector which are periodic
rv=rv[self._per]
# Calculate the hopping, see details in info/tb/tb.pdf
phase=np.exp((2.0j)*np.pi*np.dot(kpnt,rv))
amp=amp*phase
# add this hopping into a matrix and also its conjugate
if self._nspin==1:
ham[i,j]+=amp
ham[j,i]+=amp.conjugate()
elif self._nspin==2:
ham[i,:,j,:]+=amp
ham[j,:,i,:]+=amp.T.conjugate()
return ham
def _sol_ham(self,ham,eig_vectors=False):
"""Solves Hamiltonian and returns eigenvectors, eigenvalues"""
# reshape matrix first
if self._nspin==1:
ham_use=ham
elif self._nspin==2:
ham_use=ham.reshape((2*self._norb,2*self._norb))
# check that matrix is hermitian
if np.max(ham_use-ham_use.T.conj())>1.0E-9:
raise Exception("\n\nHamiltonian matrix is not hermitian?!")
#solve matrix
if eig_vectors==False: # only find eigenvalues
eval=np.linalg.eigvalsh(ham_use)
# sort eigenvalues and convert to real numbers
eval=_nicefy_eig(eval)
return np.array(eval,dtype=float)
else: # find eigenvalues and eigenvectors
(eval,eig)=np.linalg.eigh(ham_use)
# transpose matrix eig since otherwise it is confusing
# now eig[i,:] is eigenvector for eval[i]-th eigenvalue
eig=eig.T
# sort evectors, eigenvalues and convert to real numbers
(eval,eig)=_nicefy_eig(eval,eig)
# reshape eigenvectors if doing a spinfull calculation
if self._nspin==2:
eig=eig.reshape((self._nsta,self._norb,2))
return (eval,eig)
def solve_all(self,k_list=None,eig_vectors=False):
r"""
Solves for eigenvalues and (optionally) eigenvectors of the
tight-binding model on a given one-dimensional list of k-vectors.
.. note::
Eigenvectors (wavefunctions) returned by this
function and used throughout the code are exclusively given
in convention 1 as described in section 3.1 of
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`. In other words, they
are in correspondence with cell-periodic functions
:math:`u_{n {\bf k}} ({\bf r})` not
:math:`\Psi_{n {\bf k}} ({\bf r})`.
.. note::
In some cases class :class:`pythtb.wf_array` provides a more
elegant way to deal with eigensolutions on a regular mesh of
k-vectors.
:param k_list: One-dimensional array of k-vectors. Each k-vector
is given in reduced coordinates of the reciprocal space unit
cell. For example, for real space unit cell vectors [1.0,0.0]
and [0.0,2.0] and associated reciprocal space unit vectors
[2.0*pi,0.0] and [0.0,pi], k-vector with reduced coordinates
[0.25,0.25] corresponds to k-vector [0.5*pi,0.25*pi].
Dimensionality of each vector must equal to the number of
periodic directions (i.e. dimensionality of reciprocal space,
*dim_k*).
This parameter shouldn't be specified for system with
zero-dimensional k-space (*dim_k* =0).
:param eig_vectors: Optional boolean parameter, specifying whether
eigenvectors should be returned. If *eig_vectors* is True, then
both eigenvalues and eigenvectors are returned, otherwise only
eigenvalues are returned.
:returns:
* **eval** -- Two dimensional array of eigenvalues for
all bands for all kpoints. Format is eval[band,kpoint] where
first index (band) corresponds to the electron band in
question and second index (kpoint) corresponds to the k-point
as listed in the input parameter *k_list*. Eigenvalues are
sorted from smallest to largest at each k-point seperately.
In the case when reciprocal space is zero-dimensional (as in a
molecule) kpoint index is dropped and *eval* is of the format
eval[band].
* **evec** -- Three dimensional array of eigenvectors for
all bands and all kpoints. If *nspin* equals 1 the format
of *evec* is evec[band,kpoint,orbital] where "band" is the
electron band in question, "kpoint" is index of k-vector
as given in input parameter *k_list*. Finally, "orbital"
refers to the tight-binding orbital basis function.
Ordering of bands is the same as in *eval*.
Eigenvectors evec[n,k,j] correspond to :math:`C^{n {\bf
k}}_{j}` from section 3.1 equation 3.5 and 3.7 of the
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`.
In the case when reciprocal space is zero-dimensional (as in a
molecule) kpoint index is dropped and *evec* is of the format
evec[band,orbital].
In the spinfull calculation (*nspin* equals 2) evec has
additional component evec[...,spin] corresponding to the
spin component of the wavefunction.
Example usage::
# Returns eigenvalues for three k-vectors
eval = tb.solve_all([[0.0, 0.0], [0.0, 0.2], [0.0, 0.5]])
# Returns eigenvalues and eigenvectors for two k-vectors
(eval, evec) = tb.solve_all([[0.0, 0.0], [0.0, 0.2]], eig_vectors=True)
"""
# if not 0-dim case
if not (k_list is None):
nkp=len(k_list) # number of k points
# first initialize matrices for all return data
# indices are [band,kpoint]
ret_eval=np.zeros((self._nsta,nkp),dtype=float)
# indices are [band,kpoint,orbital,spin]
if self._nspin==1:
ret_evec=np.zeros((self._nsta,nkp,self._norb),dtype=complex)
elif self._nspin==2:
ret_evec=np.zeros((self._nsta,nkp,self._norb,2),dtype=complex)
# go over all kpoints
for i,k in enumerate(k_list):
# generate Hamiltonian at that point
ham=self._gen_ham(k)
# solve Hamiltonian
if eig_vectors==False:
eval=self._sol_ham(ham,eig_vectors=eig_vectors)
ret_eval[:,i]=eval[:]
else:
(eval,evec)=self._sol_ham(ham,eig_vectors=eig_vectors)
ret_eval[:,i]=eval[:]
if self._nspin==1:
ret_evec[:,i,:]=evec[:,:]
elif self._nspin==2:
ret_evec[:,i,:,:]=evec[:,:,:]
# return stuff
if eig_vectors==False:
# indices of eval are [band,kpoint]
return ret_eval
else:
# indices of eval are [band,kpoint] for evec are [band,kpoint,orbital,(spin)]
return (ret_eval,ret_evec)
else: # 0 dim case
# generate Hamiltonian
ham=self._gen_ham()
# solve
if eig_vectors==False:
eval=self._sol_ham(ham,eig_vectors=eig_vectors)
# indices of eval are [band]
return eval
else:
(eval,evec)=self._sol_ham(ham,eig_vectors=eig_vectors)
# indices of eval are [band] and of evec are [band,orbital,spin]
return (eval,evec)
def solve_one(self,k_point=None,eig_vectors=False):
r"""
Similar to :func:`pythtb.tb_model.solve_all` but solves tight-binding
model for only one k-vector.
"""
# if not 0-dim case
if not (k_point is None):
if eig_vectors==False:
eval=self.solve_all([k_point],eig_vectors=eig_vectors)
# indices of eval are [band]
return eval[:,0]
else:
(eval,evec)=self.solve_all([k_point],eig_vectors=eig_vectors)
# indices of eval are [band] for evec are [band,orbital,spin]
if self._nspin==1:
return (eval[:,0],evec[:,0,:])
elif self._nspin==2:
return (eval[:,0],evec[:,0,:,:])
else:
# do the same as solve_all
return self.solve_all(eig_vectors=eig_vectors)
def cut_piece(self,num,fin_dir,glue_edgs=False):
r"""
Constructs a (d-1)-dimensional tight-binding model out of a
d-dimensional one by repeating the unit cell a given number of
times along one of the periodic lattice vectors. The real-space
lattice vectors of the returned model are the same as those of
the original model; only the dimensionality of reciprocal space
is reduced.
:param num: How many times to repeat the unit cell.
:param fin_dir: Index of the real space lattice vector along
which you no longer wish to maintain periodicity.
:param glue_edgs: Optional boolean parameter specifying whether to
allow hoppings from one edge to the other of a cut model.
:returns:
* **fin_model** -- Object of type
:class:`pythtb.tb_model` representing a cutout
tight-binding model. Orbitals in *fin_model* are
numbered so that the i-th orbital of the n-th unit
cell has index i+norb*n (here norb is the number of
orbitals in the original model).
Example usage::
A = tb_model(3, 3, ...)
# Construct two-dimensional model B out of three-dimensional
# model A by repeating model along second lattice vector ten times
B = A.cut_piece(10, 1)
# Further cut two-dimensional model B into one-dimensional model
# A by repeating unit cell twenty times along third lattice
# vector and allow hoppings from one edge to the other
C = B.cut_piece(20, 2, glue_edgs=True)
See also these examples: :ref:`haldane_fin-example`,
:ref:`edge-example`.
"""
if self._dim_k ==0:
raise Exception("\n\nModel is already finite")
if type(num).__name__!='int':
raise Exception("\n\nArgument num not an integer")
# check value of num
if num<1:
raise Exception("\n\nArgument num must be positive!")
if num==1 and glue_edgs==True:
raise Exception("\n\nCan't have num==1 and glueing of the edges!")
# generate orbitals of a finite model
fin_orb=[]
onsite=[] # store also onsite energies
for i in range(num): # go over all cells in finite direction
for j in range(self._norb): # go over all orbitals in one cell
# make a copy of j-th orbital
orb_tmp=np.copy(self._orb[j,:])
# change coordinate along finite direction
orb_tmp[fin_dir]+=float(i)
# add to the list
fin_orb.append(orb_tmp)
# do the onsite energies at the same time
onsite.append(self._site_energies[j])
onsite=np.array(onsite)
fin_orb=np.array(fin_orb)
# generate periodic directions of a finite model
fin_per=copy.deepcopy(self._per)
# find if list of periodic directions contains the one you
# want to make finite
if fin_per.count(fin_dir)!=1:
raise Exception("\n\nCan not make model finite along this direction!")
# remove index which is no longer periodic
fin_per.remove(fin_dir)
# generate object of tb_model type that will correspond to a cutout
fin_model=tb_model(self._dim_k-1,
self._dim_r,
copy.deepcopy(self._lat),
fin_orb,
fin_per,
self._nspin)
# remember if came from w90
fin_model._assume_position_operator_diagonal=self._assume_position_operator_diagonal
# now put all onsite terms for the finite model
fin_model.set_onsite(onsite,mode="reset")
# put all hopping terms
for c in range(num): # go over all cells in finite direction
for h in range(len(self._hoppings)): # go over all hoppings in one cell
# amplitude of the hop is the same
amp=self._hoppings[h][0]
# lattice vector of the hopping
ind_R=copy.deepcopy(self._hoppings[h][3])
jump_fin=ind_R[fin_dir] # store by how many cells is the hopping in finite direction
if fin_model._dim_k!=0:
ind_R[fin_dir]=0 # one of the directions now becomes finite
# index of "from" and "to" hopping indices
hi=self._hoppings[h][1] + c*self._norb
# have to compensate for the fact that ind_R in finite direction
# will not be used in the finite model
hj=self._hoppings[h][2] + (c + jump_fin)*self._norb
# decide whether this hopping should be added or not
to_add=True
# if edges are not glued then neglect all jumps that spill out
if glue_edgs==False:
if hj<0 or hj>=self._norb*num:
to_add=False
# if edges are glued then do mod division to wrap up the hopping
else:
hj=int(hj)%int(self._norb*num)
# add hopping to a finite model
if to_add==True:
if fin_model._dim_k==0:
fin_model.set_hop(amp,hi,hj,mode="add",allow_conjugate_pair=True)
else:
fin_model.set_hop(amp,hi,hj,ind_R,mode="add",allow_conjugate_pair=True)
return fin_model
def reduce_dim(self,remove_k,value_k):
r"""
Reduces dimensionality of the model by taking a reciprocal-space
slice of the Bloch Hamiltonian :math:`{\cal H}_{\bf k}`. The Bloch
Hamiltonian (defined in :download:`notes on tight-binding
formalism <misc/pythtb-formalism.pdf>` in section 3.1 equation 3.7) of a
d-dimensional model is a function of d-dimensional k-vector.
This function returns a d-1 dimensional tight-binding model obtained
by constraining one of k-vector components in :math:`{\cal H}_{\bf
k}` to be a constant.
:param remove_k: Which reciprocal space unit vector component
you wish to keep constant.
:param value_k: Value of the k-vector component to which you are
constraining this model. Must be given in reduced coordinates.
:returns:
* **red_tb** -- Object of type :class:`pythtb.tb_model`
representing a reduced tight-binding model.
Example usage::
# Constrains second k-vector component to equal 0.3
red_tb = tb.reduce_dim(1, 0.3)
"""
#
if self._dim_k==0:
raise Exception("\n\nCan not reduce dimensionality even further!")
# make a copy
red_tb=copy.deepcopy(self)
# make one of the directions not periodic
red_tb._per.remove(remove_k)
red_tb._dim_k=len(red_tb._per)
# check that really removed one and only one direction
if red_tb._dim_k!=self._dim_k-1:
raise Exception("\n\nSpecified wrong dimension to reduce!")
# specify hopping terms from scratch
red_tb._hoppings=[]
# set all hopping parameters for this value of value_k
for h in range(len(self._hoppings)):
hop=self._hoppings[h]
if self._nspin==1:
amp=complex(hop[0])
elif self._nspin==2:
amp=np.array(hop[0],dtype=complex)
i=hop[1]; j=hop[2]
ind_R=np.array(hop[3],dtype=int)
# vector from one site to another
rv=-red_tb._orb[i,:]+red_tb._orb[j,:]+np.array(ind_R,dtype=float)
# take only r-vector component along direction you are not making periodic
rv=rv[remove_k]
# Calculate the part of hopping phase, only for this direction
phase=np.exp((2.0j)*np.pi*(value_k*rv))
# store modified version of the hop
# Since we are getting rid of one dimension, it could be that now
# one of the hopping terms became onsite term because one direction
# is no longer periodic
if i==j and (False not in (np.array(ind_R[red_tb._per],dtype=int)==0)):
if ind_R[remove_k]==0:
# in this case this is really an onsite term
red_tb.set_onsite(amp*phase,i,mode="add")
else:
# in this case must treat both R and -R because that term would
# have been counted twice without dimensional reduction
if self._nspin==1:
red_tb.set_onsite(amp*phase+(amp*phase).conj(),i,mode="add")
elif self._nspin==2:
red_tb.set_onsite(amp*phase+(amp.T*phase).conj(),i,mode="add")
else:
# just in case make the R vector zero along the reduction dimension
ind_R[remove_k]=0
# add hopping term
red_tb.set_hop(amp*phase,i,j,ind_R,mode="add",allow_conjugate_pair=True)
return red_tb
def make_supercell(self, sc_red_lat, return_sc_vectors=False, to_home=True):
r"""
Returns tight-binding model :class:`pythtb.tb_model`
representing a super-cell of a current object. This function
can be used together with *cut_piece* in order to create slabs
with arbitrary surfaces.
By default all orbitals will be shifted to the home cell after
unit cell has been created. That way all orbitals will have
reduced coordinates between 0 and 1. If you wish to avoid this
behavior, you need to set, *to_home* argument to *False*.
:param sc_red_lat: Array of integers with size *dim_r*dim_r*
defining a super-cell lattice vectors in terms of reduced
coordinates of the original tight-binding model. First index
in the array specifies super-cell vector, while second index
specifies coordinate of that super-cell vector. If
*dim_k<dim_r* then still need to specify full array with
size *dim_r*dim_r* for consistency, but non-periodic
directions must have 0 on off-diagonal elemets s and 1 on
diagonal.
:param return_sc_vectors: Optional parameter. Default value is
*False*. If *True* returns also lattice vectors inside the
super-cell. Internally, super-cell tight-binding model will
have orbitals repeated in the same order in which these
super-cell vectors are given, but if argument *to_home*
is set *True* (which it is by default) then additionally,
orbitals will be shifted to the home cell.
:param to_home: Optional parameter, if *True* will
shift all orbitals to the home cell. Default value is *True*.
:returns:
* **sc_tb** -- Object of type :class:`pythtb.tb_model`
representing a tight-binding model in a super-cell.
* **sc_vectors** -- Super-cell vectors, returned only if
*return_sc_vectors* is set to *True* (default value is
*False*).
Example usage::
# Creates super-cell out of 2d tight-binding model tb
sc_tb = tb.make_supercell([[2, 1], [-1, 2]])
"""
# Can't make super cell for model without periodic directions
if self._dim_r==0:
raise Exception("\n\nMust have at least one periodic direction to make a super-cell")
# convert array to numpy array
use_sc_red_lat=np.array(sc_red_lat)
# checks on super-lattice array
if use_sc_red_lat.shape!=(self._dim_r,self._dim_r):
raise Exception("\n\nDimension of sc_red_lat array must be dim_r*dim_r")
if use_sc_red_lat.dtype!=int:
raise Exception("\n\nsc_red_lat array elements must be integers")
for i in range(self._dim_r):
for j in range(self._dim_r):
if (i==j) and (i not in self._per) and use_sc_red_lat[i,j]!=1:
raise Exception("\n\nDiagonal elements of sc_red_lat for non-periodic directions must equal 1.")
if (i!=j) and ((i not in self._per) or (j not in self._per)) and use_sc_red_lat[i,j]!=0:
raise Exception("\n\nOff-diagonal elements of sc_red_lat for non-periodic directions must equal 0.")
if np.abs(np.linalg.det(use_sc_red_lat))<1.0E-6:
raise Exception("\n\nSuper-cell lattice vectors length/area/volume too close to zero, or zero.")
if np.linalg.det(use_sc_red_lat)<0.0:
raise Exception("\n\nSuper-cell lattice vectors need to form right handed system.")
# converts reduced vector in original lattice to reduced vector in super-cell lattice
def to_red_sc(red_vec_orig):
return np.linalg.solve(np.array(use_sc_red_lat.T,dtype=float),
np.array(red_vec_orig,dtype=float))
# conservative estimate on range of search for super-cell vectors
max_R=np.max(np.abs(use_sc_red_lat))*self._dim_r
# candidates for super-cell vectors
# this is hard-coded and can be improved!
sc_cands=[]
if self._dim_r==1:
for i in range(-max_R,max_R+1):
sc_cands.append(np.array([i]))
elif self._dim_r==2:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j]))
elif self._dim_r==3:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
for k in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j,k]))
elif self._dim_r==4:
for i in range(-max_R,max_R+1):
for j in range(-max_R,max_R+1):
for k in range(-max_R,max_R+1):
for l in range(-max_R,max_R+1):
sc_cands.append(np.array([i,j,k,l]))
else:
raise Exception("\n\nWrong dimensionality of dim_r!")
# find all vectors inside super-cell
# store them here
sc_vec=[]
eps_shift=np.sqrt(2.0)*1.0E-8 # shift of the grid, so to avoid double counting
#
for vec in sc_cands:
# compute reduced coordinates of this candidate vector in the super-cell frame
tmp_red=to_red_sc(vec).tolist()
# check if in the interior
inside=True
for t in tmp_red:
if t<=-1.0*eps_shift or t>1.0-eps_shift:
inside=False
if inside==True:
sc_vec.append(np.array(vec))
# number of times unit cell is repeated in the super-cell
num_sc=len(sc_vec)
# check that found enough super-cell vectors
if int(round(np.abs(np.linalg.det(use_sc_red_lat))))!=num_sc:
raise Exception("\n\nSuper-cell generation failed! Wrong number of super-cell vectors found.")
# cartesian vectors of the super lattice
sc_cart_lat=np.dot(use_sc_red_lat,self._lat)
# orbitals of the super-cell tight-binding model
sc_orb=[]
for cur_sc_vec in sc_vec: # go over all super-cell vectors
for orb in self._orb: # go over all orbitals
# shift orbital and compute coordinates in
# reduced coordinates of super-cell
sc_orb.append(to_red_sc(orb+cur_sc_vec))
# create super-cell tb_model object to be returned
sc_tb=tb_model(self._dim_k,self._dim_r,sc_cart_lat,sc_orb,per=self._per,nspin=self._nspin)
# remember if came from w90
sc_tb._assume_position_operator_diagonal=self._assume_position_operator_diagonal
# repeat onsite energies
for i in range(num_sc):
for j in range(self._norb):
sc_tb.set_onsite(self._site_energies[j],i*self._norb+j)
# set hopping terms
for c,cur_sc_vec in enumerate(sc_vec): # go over all super-cell vectors
for h in range(len(self._hoppings)): # go over all hopping terms of the original model
# amplitude of the hop is the same
amp=self._hoppings[h][0]
# lattice vector of the hopping
ind_R=copy.deepcopy(self._hoppings[h][3])
# super-cell component of hopping lattice vector
# shift also by current super cell vector
sc_part=np.floor(to_red_sc(ind_R+cur_sc_vec)) # round down!
sc_part=np.array(sc_part,dtype=int)
# find remaining vector in the original reduced coordinates
orig_part=ind_R+cur_sc_vec-np.dot(sc_part,use_sc_red_lat)
# remaining vector must equal one of the super-cell vectors
pair_ind=None
for p,pair_sc_vec in enumerate(sc_vec):
if False not in (pair_sc_vec==orig_part):
if pair_ind!=None:
raise Exception("\n\nFound duplicate super cell vector!")
pair_ind=p
if pair_ind==None:
raise Exception("\n\nDid not find super cell vector!")
# index of "from" and "to" hopping indices
hi=self._hoppings[h][1] + c*self._norb
hj=self._hoppings[h][2] + pair_ind*self._norb
# add hopping term
sc_tb.set_hop(amp,hi,hj,sc_part,mode="add",allow_conjugate_pair=True)
# put orbitals to home cell if asked for
if to_home==True:
sc_tb._shift_to_home()
# return new tb model and vectors if needed
if return_sc_vectors==False:
return sc_tb
else:
return (sc_tb,sc_vec)
def _shift_to_home(self):
"""Shifts all orbital positions to the home unit cell. After
this function is called all reduced coordiantes of orbitals
will be between 0 and 1. It may be useful to call this
function after using make_supercell."""
# go over all orbitals
for i in range(self._norb):
cur_orb=self._orb[i]
# compute orbital in the home cell
round_orb=(np.array(cur_orb)+1.0E-6)%1.0
# find displacement vector needed to bring back to home cell
disp_vec=np.array(np.round(cur_orb-round_orb),dtype=int)
# check if have at least one non-zero component
if True in (disp_vec!=0):
# shift orbital
self._orb[i]-=np.array(disp_vec,dtype=float)
# shift also hoppings
if self._dim_k!=0:
for h in range(len(self._hoppings)):
if self._hoppings[h][1]==i:
self._hoppings[h][3]-=disp_vec
if self._hoppings[h][2]==i:
self._hoppings[h][3]+=disp_vec
def k_uniform_mesh(self,mesh_size):
r"""
Returns a uniform grid of k-points that can be passed to
passed to function :func:`pythtb.tb_model.solve_all`. This
function is useful for plotting density of states histogram
and similar.
Returned uniform grid of k-points always contains the origin.
:param mesh_size: Number of k-points in the mesh in each
periodic direction of the model.
:returns:
* **k_vec** -- Array of k-vectors on the mesh that can be
directly passed to function :func:`pythtb.tb_model.solve_all`.
Example usage::
# returns a 10x20x30 mesh of a tight binding model
# with three periodic directions
k_vec = my_model.k_uniform_mesh([10,20,30])
# solve model on the uniform mesh
my_model.solve_all(k_vec)
"""
# get the mesh size and checks for consistency
use_mesh=np.array(list(map(round,mesh_size)),dtype=int)
if use_mesh.shape!=(self._dim_k,):
print(use_mesh.shape)
raise Exception("\n\nIncorrect size of the specified k-mesh!")
if np.min(use_mesh)<=0:
raise Exception("\n\nMesh must have positive non-zero number of elements.")
# construct the mesh
if self._dim_k==1:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[1])
norm=norm.transpose([1,0])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,0]).reshape([use_mesh[0],1])
elif self._dim_k==2:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[2])
norm=norm.transpose([2,0,1])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,0]).reshape([use_mesh[0]*use_mesh[1],2])
elif self._dim_k==3:
# get a mesh
k_vec=np.mgrid[0:use_mesh[0],0:use_mesh[1],0:use_mesh[2]]
# normalize the mesh
norm=np.tile(np.array(use_mesh,dtype=float),use_mesh)
norm=norm.reshape(use_mesh.tolist()+[3])
norm=norm.transpose([3,0,1,2])
k_vec=k_vec/norm
# final reshape
k_vec=k_vec.transpose([1,2,3,0]).reshape([use_mesh[0]*use_mesh[1]*use_mesh[2],3])
else:
raise Exception("\n\nUnsupported dim_k!")
return k_vec
def k_path(self,kpts,nk,report=True):
r"""
Interpolates a path in reciprocal space between specified
k-points. In 2D or 3D the k-path can consist of several
straight segments connecting high-symmetry points ("nodes"),
and the results can be used to plot the bands along this path.
The interpolated path that is returned contains as
equidistant k-points as possible.
:param kpts: Array of k-vectors in reciprocal space between
which interpolated path should be constructed. These
k-vectors must be given in reduced coordinates. As a
special case, in 1D k-space kpts may be a string:
* *"full"* -- Implies *[ 0.0, 0.5, 1.0]* (full BZ)
* *"fullc"* -- Implies *[-0.5, 0.0, 0.5]* (full BZ, centered)
* *"half"* -- Implies *[ 0.0, 0.5]* (half BZ)
:param nk: Total number of k-points to be used in making the plot.
:param report: Optional parameter specifying whether printout
is desired (default is True).
:returns:
* **k_vec** -- Array of (nearly) equidistant interpolated
k-points. The distance between the points is calculated in
the Cartesian frame, however coordinates themselves are
given in dimensionless reduced coordinates! This is done
so that this array can be directly passed to function
:func:`pythtb.tb_model.solve_all`.
* **k_dist** -- Array giving accumulated k-distance to each
k-point in the path. Unlike array *k_vec* this one has
dimensions! (Units are defined here so that for an
one-dimensional crystal with lattice constant equal to for
example *10* the length of the Brillouin zone would equal
*1/10=0.1*. In other words factors of :math:`2\pi` are
absorbed into *k*.) This array can be used to plot path in
the k-space so that the distances between the k-points in
the plot are exact.
* **k_node** -- Array giving accumulated k-distance to each
node on the path in Cartesian coordinates. This array is
typically used to plot nodes (typically special points) on
the path in k-space.
Example usage::
# Construct a path connecting four nodal points in k-space
# Path will contain 401 k-points, roughly equally spaced
path = [[0.0, 0.0], [0.0, 0.5], [0.5, 0.5], [0.0, 0.0]]
(k_vec,k_dist,k_node) = my_model.k_path(path,401)
# solve for eigenvalues on that path
evals = tb.solve_all(k_vec)
# then use evals, k_dist, and k_node to plot bandstructure
# (see examples)
"""
# processing of special cases for kpts
if kpts=='full':
# full Brillouin zone for 1D case
k_list=np.array([[0.],[0.5],[1.]])
elif kpts=='fullc':
# centered full Brillouin zone for 1D case
k_list=np.array([[-0.5],[0.],[0.5]])
elif kpts=='half':
# half Brillouin zone for 1D case
k_list=np.array([[0.],[0.5]])
else:
k_list=np.array(kpts)
# in 1D case if path is specified as a vector, convert it to an (n,1) array
if len(k_list.shape)==1 and self._dim_k==1:
k_list=np.array([k_list]).T
# make sure that k-points in the path have correct dimension
if k_list.shape[1]!=self._dim_k:
print('input k-space dimension is',k_list.shape[1])
print('k-space dimension taken from model is',self._dim_k)
raise Exception("\n\nk-space dimensions do not match")
# must have more k-points in the path than number of nodes
if nk<k_list.shape[0]:
raise Exception("\n\nMust have more points in the path than number of nodes.")
# number of nodes
n_nodes=k_list.shape[0]
# extract the lattice vectors from the TB model
lat_per=np.copy(self._lat)
# choose only those that correspond to periodic directions
lat_per=lat_per[self._per]
# compute k_space metric tensor
k_metric = np.linalg.inv(np.dot(lat_per,lat_per.T))
# Find distances between nodes and set k_node, which is
# accumulated distance since the start of the path
# initialize array k_node
k_node=np.zeros(n_nodes,dtype=float)
for n in range(1,n_nodes):
dk = k_list[n]-k_list[n-1]
dklen = np.sqrt(np.dot(dk,np.dot(k_metric,dk)))
k_node[n]=k_node[n-1]+dklen
# Find indices of nodes in interpolated list
node_index=[0]
for n in range(1,n_nodes-1):
frac=k_node[n]/k_node[-1]
node_index.append(int(round(frac*(nk-1))))
node_index.append(nk-1)
# initialize two arrays temporarily with zeros
# array giving accumulated k-distance to each k-point
k_dist=np.zeros(nk,dtype=float)
# array listing the interpolated k-points
k_vec=np.zeros((nk,self._dim_k),dtype=float)
# go over all kpoints
k_vec[0]=k_list[0]
for n in range(1,n_nodes):
n_i=node_index[n-1]
n_f=node_index[n]
kd_i=k_node[n-1]
kd_f=k_node[n]
k_i=k_list[n-1]
k_f=k_list[n]
for j in range(n_i,n_f+1):
frac=float(j-n_i)/float(n_f-n_i)
k_dist[j]=kd_i+frac*(kd_f-kd_i)
k_vec[j]=k_i+frac*(k_f-k_i)
if report==True:
if self._dim_k==1:
print(' Path in 1D BZ defined by nodes at '+str(k_list.flatten()))
else:
print('----- k_path report begin ----------')
original=np.get_printoptions()
np.set_printoptions(precision=5)
print('real-space lattice vectors\n', lat_per)
print('k-space metric tensor\n', k_metric)
print('internal coordinates of nodes\n', k_list)
if (lat_per.shape[0]==lat_per.shape[1]):
# lat_per is invertible
lat_per_inv=np.linalg.inv(lat_per).T
print('reciprocal-space lattice vectors\n', lat_per_inv)
# cartesian coordinates of nodes
kpts_cart=np.tensordot(k_list,lat_per_inv,axes=1)
print('cartesian coordinates of nodes\n',kpts_cart)
print('list of segments:')
for n in range(1,n_nodes):
dk=k_node[n]-k_node[n-1]
dk_str=_nice_float(dk,7,5)
print(' length = '+dk_str+' from ',k_list[n-1],' to ',k_list[n])
print('node distance list:', k_node)
print('node index list: ', np.array(node_index))
np.set_printoptions(precision=original["precision"])
print('----- k_path report end ------------')
print()
return (k_vec,k_dist,k_node)
def ignore_position_operator_offdiagonal(self):
"""Call to this function enables one to approximately compute
Berry-like objects from tight-binding models that were
obtained from Wannier90."""
self._assume_position_operator_diagonal=True
def position_matrix(self, evec, dir):
r"""
Returns matrix elements of the position operator along
direction *dir* for eigenvectors *evec* at a single k-point.
Position operator is defined in reduced coordinates.
The returned object :math:`X` is
.. math::
X_{m n {\bf k}}^{\alpha} = \langle u_{m {\bf k}} \vert
r^{\alpha} \vert u_{n {\bf k}} \rangle
Here :math:`r^{\alpha}` is the position operator along direction
:math:`\alpha` that is selected by *dir*.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing the center.
This integer must not be one of the periodic directions
since position operator matrix element in that case is not
well defined.
:returns:
* **pos_mat** -- Position operator matrix :math:`X_{m n}` as defined
above. This is a square matrix with size determined by number of bands
given in *evec* input array. First index of *pos_mat* corresponds to
bra vector (*m*) and second index to ket (*n*).
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes position operator matrix elements for 3-rd kpoint
# and bottom five bands along first coordinate
pos_mat = my_model.position_matrix(evecs[:5,2], 0)
See also this example: :ref:`haldane_hwf-example`,
"""
# make sure specified direction is not periodic!
if dir in self._per:
raise Exception("Can not compute position matrix elements along periodic direction!")
# make sure direction is not out of range
if dir<0 or dir>=self._dim_r:
raise Exception("Direction out of range!")
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# get coordinates of orbitals along the specified direction
pos_tmp=self._orb[:,dir]
# reshape arrays in the case of spinfull calculation
if self._nspin==2:
# tile along spin direction if needed
pos_use=np.tile(pos_tmp,(2,1)).transpose().flatten()
# also flatten the state along the spin index
evec_use=evec.reshape((evec.shape[0],evec.shape[1]*evec.shape[2]))
else:
pos_use=pos_tmp
evec_use=evec
# position matrix elements
pos_mat=np.zeros((evec_use.shape[0],evec_use.shape[0]),dtype=complex)
# go over all bands
for i in range(evec_use.shape[0]):
for j in range(evec_use.shape[0]):
pos_mat[i,j]=np.dot(evec_use[i].conj(),pos_use*evec_use[j])
# make sure matrix is hermitian
if np.max(pos_mat-pos_mat.T.conj())>1.0E-9:
raise Exception("\n\n Position matrix is not hermitian?!")
return pos_mat
def position_expectation(self,evec,dir):
r"""
Returns diagonal matrix elements of the position operator.
These elements :math:`X_{n n}` can be interpreted as an
average position of n-th Bloch state *evec[n]* along
direction *dir*. Generally speaking these centers are *not*
hybrid Wannier function centers (which are instead
returned by :func:`pythtb.tb_model.position_hwf`).
See function :func:`pythtb.tb_model.position_matrix` for
definition of matrix :math:`X`.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing matrix
elements. This integer must not be one of the periodic
directions since position operator matrix element in that
case is not well defined.
:returns:
* **pos_exp** -- Diagonal elements of the position operator matrix :math:`X`.
Length of this vector is determined by number of bands given in *evec* input
array.
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes average position for 3-rd kpoint
# and bottom five bands along first coordinate
pos_exp = my_model.position_expectation(evecs[:5,2], 0)
See also this example: :ref:`haldane_hwf-example`.
"""
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
pos_exp=self.position_matrix(evec,dir).diagonal()
return np.array(np.real(pos_exp),dtype=float)
def position_hwf(self,evec,dir,hwf_evec=False,basis="orbital"):
r"""
Returns eigenvalues and optionally eigenvectors of the
position operator matrix :math:`X` in either Bloch or orbital
basis. These eigenvectors can be interpreted as linear
combinations of Bloch states *evec* that have minimal extent (or
spread :math:`\Omega` in the sense of maximally localized
Wannier functions) along direction *dir*. The eigenvalues are
average positions of these localized states.
Note that these eigenvectors are not maximally localized
Wannier functions in the usual sense because they are
localized only along one direction. They are also not the
average positions of the Bloch states *evec*, which are
instead computed by :func:`pythtb.tb_model.position_expectation`.
See function :func:`pythtb.tb_model.position_matrix` for
the definition of the matrix :math:`X`.
See also Fig. 3 in Phys. Rev. Lett. 102, 107603 (2009) for a
discussion of the hybrid Wannier function centers in the
context of a Chern insulator.
:param evec: Eigenvectors for which we are computing matrix
elements of the position operator. The shape of this array
is evec[band,orbital] if *nspin* equals 1 and
evec[band,orbital,spin] if *nspin* equals 2.
:param dir: Direction along which we are computing matrix
elements. This integer must not be one of the periodic
directions since position operator matrix element in that
case is not well defined.
:param hwf_evec: Optional boolean variable. If set to *True*
this function will return not only eigenvalues but also
eigenvectors of :math:`X`. Default value is *False*.
:param basis: Optional parameter. If basis="bloch" then hybrid
Wannier function *hwf_evec* is written in the Bloch basis. I.e.
hwf[i,j] correspond to the weight of j-th Bloch state from *evec*
in the i-th hybrid Wannier function. If basis="orbital" and nspin=1 then
hwf[i,orb] correspond to the weight of orb-th orbital in the i-th
hybrid Wannier function. If basis="orbital" and nspin=2 then
hwf[i,orb,spin] correspond to the weight of orb-th orbital, spin-th
spin component in the i-th hybrid Wannier function. Default value
is "orbital".
:returns:
* **hwfc** -- Eigenvalues of the position operator matrix :math:`X`
(also called hybrid Wannier function centers).
Length of this vector equals number of bands given in *evec* input
array. Hybrid Wannier function centers are ordered in ascending order.
Note that in general *n*-th hwfc does not correspond to *n*-th electronic
state *evec*.
* **hwf** -- Eigenvectors of the position operator matrix :math:`X`.
(also called hybrid Wannier functions). These are returned only if
parameter *hwf_evec* is set to *True*.
The shape of this array is [h,x] or [h,x,s] depending on value of *basis*
and *nspin*. If *basis* is "bloch" then x refers to indices of
Bloch states *evec*. If *basis* is "orbital" then *x* (or *x* and *s*)
correspond to orbital index (or orbital and spin index if *nspin* is 2).
Example usage::
# diagonalizes Hamiltonian at some k-points
(evals, evecs) = my_model.solve_all(k_vec,eig_vectors=True)
# computes hybrid Wannier centers (and functions) for 3-rd kpoint
# and bottom five bands along first coordinate
(hwfc, hwf) = my_model.position_hwf(evecs[:5,2], 0, hwf_evec=True, basis="orbital")
See also this example: :ref:`haldane_hwf-example`,
"""
# check if model came from w90
if self._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# get position matrix
pos_mat=self.position_matrix(evec,dir)
# diagonalize
if hwf_evec==False:
hwfc=np.linalg.eigvalsh(pos_mat)
# sort eigenvalues and convert to real numbers
hwfc=_nicefy_eig(hwfc)
return np.array(hwfc,dtype=float)
else: # find eigenvalues and eigenvectors
(hwfc,hwf)=np.linalg.eigh(pos_mat)
# transpose matrix eig since otherwise it is confusing
# now eig[i,:] is eigenvector for eval[i]-th eigenvalue
hwf=hwf.T
# sort evectors, eigenvalues and convert to real numbers
(hwfc,hwf)=_nicefy_eig(hwfc,hwf)
# convert to right basis
if basis.lower().strip()=="bloch":
return (hwfc,hwf)
elif basis.lower().strip()=="orbital":
if self._nspin==1:
ret_hwf=np.zeros((hwf.shape[0],self._norb),dtype=complex)
# sum over bloch states to get hwf in orbital basis
for i in range(ret_hwf.shape[0]):
ret_hwf[i]=np.dot(hwf[i],evec)
hwf=ret_hwf
else:
ret_hwf=np.zeros((hwf.shape[0],self._norb*2),dtype=complex)
# get rid of spin indices
evec_use=evec.reshape([hwf.shape[0],self._norb*2])
# sum over states
for i in range(ret_hwf.shape[0]):
ret_hwf[i]=np.dot(hwf[i],evec_use)
# restore spin indices
hwf=ret_hwf.reshape([hwf.shape[0],self._norb,2])
return (hwfc,hwf)
else:
raise Exception("\n\nBasis must be either bloch or orbital!")
# keeping old name for backwards compatibility
# will be removed in future
tb_model.set_sites=tb_model.set_onsite
tb_model.add_hop=tb_model.set_hop
tbmodel=tb_model
class wf_array(object):
r"""
This class is used to solve a tight-binding model
:class:`pythtb.tb_model` on a regular or non-regular grid
of points in reciprocal space and/or parameter space, and
perform on it various calculations. For example it can be
used to calculate the Berry phase, Berry curvature, 1st Chern
number, etc.
*Regular k-space grid*:
If the grid is a regular k-mesh (no parametric dimensions),
a single call to the function
:func:`pythtb.wf_array.solve_on_grid` will both construct a
k-mesh that uniformly covers the Brillouin zone, and populate
it with wavefunctions (eigenvectors) computed on this grid.
The last point in each k-dimension is set so that it represents
the same Bloch function as the first one (this involves the
insertion of some orbital-position-dependent phase factors).
Example :ref:`haldane_bp-example` shows how to use wf_array on
a regular grid of points in k-space. Examples :ref:`cone-example`
and :ref:`3site_cycle-example` show how to use non-regular grid of
points.
*Parametric or irregular k-space grid grid*:
An irregular grid of points, or a grid that includes also
one or more parametric dimensions, can be populated manually
with the help of the *[]* operator. For example, to copy
eeigenvectors *evec* into coordinate (2,3) in the *wf_array*
object *wf* one can simply do::
wf[2,3]=evec
The eigenvectors (wavefunctions) *evec* in the example above
are expected to be in the format *evec[band,orbital]*
(or *evec[band,orbital,spin]* for the spinfull calculation).
This is the same format as returned by
:func:`pythtb.tb_model.solve_one` or
:func:`pythtb.tb_model.solve_all` (in the latter case one
needs to restrict it to a single k-point as *evec[:,kpt,:]*
if the model has *dim_k>=1*).
If wf_array is used for closed paths, either in a
reciprocal-space or parametric direction, then one needs to
include both the starting and ending eigenfunctions even though
they are physically equivalent. If the array dimension in
question is a k-vector direction and the path traverses the
Brillouin zone in a primitive reciprocal-lattice direction,
:func:`pythtb.wf_array.impose_pbc` can be used to associate
the starting and ending points with each other; if it is a
non-winding loop in k-space or a loop in parameter space,
then :func:`pythtb.wf_array.impose_loop` can be used instead.
(These may not be necessary if only Berry fluxes are needed.)
Example :ref:`3site_cycle-example` shows how one
of the directions of *wf_array* object need not be a k-vector
direction, but can instead be a Hamiltonian parameter :math:`\lambda`
(see also discussion after equation 4.1 in :download:`notes on
tight-binding formalism <misc/pythtb-formalism.pdf>`).
:param model: Object of type :class:`pythtb.tb_model` representing
tight-binding model associated with this array of eigenvectors.
:param mesh_arr: Array giving a dimension of the grid of points in
each reciprocal-space or parametric direction.
Example usage::
# Construct wf_array capable of storing an 11x21 array of
# wavefunctions
wf = wf_array(tb, [11, 21])
# populate this wf_array with regular grid of points in
# Brillouin zone
wf.solve_on_grid([0.0, 0.0])
# Compute set of eigenvectors at one k-point
(eval, evec) = tb.solve_one([kx, ky], eig_vectors = True)
# Store it manually into a specified location in the array
wf[3, 4] = evec
# To access the eigenvectors from the same position
print wf[3, 4]
"""
def __init__(self,model,mesh_arr):
# number of electronic states for each k-point
self._nsta=model._nsta
# number of spin components
self._nspin=model._nspin
# number of orbitals
self._norb=model._norb
# store orbitals from the model
self._orb=np.copy(model._orb)
# store entire model as well
self._model=copy.deepcopy(model)
# store dimension of array of points on which to keep wavefunctions
self._mesh_arr=np.array(mesh_arr)
self._dim_arr=len(self._mesh_arr)
# all dimensions should be 2 or larger, because pbc can be used
if True in (self._mesh_arr<=1).tolist():
raise Exception("\n\nDimension of wf_array object in each direction must be 2 or larger.")
# generate temporary array used later to generate object ._wfs
wfs_dim=np.copy(self._mesh_arr)
wfs_dim=np.append(wfs_dim,self._nsta)
wfs_dim=np.append(wfs_dim,self._norb)
if self._nspin==2:
wfs_dim=np.append(wfs_dim,self._nspin)
# store wavefunctions here in the form _wfs[kx_index,ky_index, ... ,band,orb,spin]
self._wfs=np.zeros(wfs_dim,dtype=complex)
def solve_on_grid(self,start_k):
r"""
Solve a tight-binding model on a regular mesh of k-points covering
the entire reciprocal-space unit cell. Both points at the opposite
sides of reciprocal-space unit cell are included in the array.
This function also automatically imposes periodic boundary
conditions on the eigenfunctions. See also the discussion in
:func:`pythtb.wf_array.impose_pbc`.
:param start_k: Origin of a regular grid of points in the reciprocal space.
:returns:
* **gaps** -- returns minimal direct bandgap between n-th and n+1-th
band on all the k-points in the mesh. Note that in the case of band
crossings one may have to use very dense k-meshes to resolve
the crossing.
Example usage::
# Solve eigenvectors on a regular grid anchored
# at a given point
wf.solve_on_grid([-0.5, -0.5])
"""
# check dimensionality
if self._dim_arr!=self._model._dim_k:
raise Exception("\n\nIf using solve_on_grid method, dimension of wf_array must equal dim_k of the tight-binding model!")
# to return gaps at all k-points
if self._norb<=1:
all_gaps=None # trivial case since there is only one band
else:
gap_dim=np.copy(self._mesh_arr)-1
gap_dim=np.append(gap_dim,self._norb*self._nspin-1)
all_gaps=np.zeros(gap_dim,dtype=float)
#
if self._dim_arr==1:
# don't need to go over the last point because that will be
# computed in the impose_pbc call
for i in range(self._mesh_arr[0]-1):
# generate a kpoint
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1)]
# solve at that point
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
# store wavefunctions
self[i]=evec
# store gaps
if all_gaps is not None:
all_gaps[i,:]=eval[1:]-eval[:-1]
# impose boundary conditions
self.impose_pbc(0,self._model._per[0])
elif self._dim_arr==2:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j]=evec
if all_gaps is not None:
all_gaps[i,j,:]=eval[1:]-eval[:-1]
for dir in range(2):
self.impose_pbc(dir,self._model._per[dir])
elif self._dim_arr==3:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
for k in range(self._mesh_arr[2]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1),\
start_k[2]+float(k)/float(self._mesh_arr[2]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j,k]=evec
if all_gaps is not None:
all_gaps[i,j,k,:]=eval[1:]-eval[:-1]
for dir in range(3):
self.impose_pbc(dir,self._model._per[dir])
elif self._dim_arr==4:
for i in range(self._mesh_arr[0]-1):
for j in range(self._mesh_arr[1]-1):
for k in range(self._mesh_arr[2]-1):
for l in range(self._mesh_arr[3]-1):
kpt=[start_k[0]+float(i)/float(self._mesh_arr[0]-1),\
start_k[1]+float(j)/float(self._mesh_arr[1]-1),\
start_k[2]+float(k)/float(self._mesh_arr[2]-1),\
start_k[3]+float(l)/float(self._mesh_arr[3]-1)]
(eval,evec)=self._model.solve_one(kpt,eig_vectors=True)
self[i,j,k,l]=evec
if all_gaps is not None:
all_gaps[i,j,k,l,:]=eval[1:]-eval[:-1]
for dir in range(4):
self.impose_pbc(dir,self._model._per[dir])
else:
raise Exception("\n\nWrong dimensionality!")
return all_gaps.min(axis=tuple(range(self._dim_arr)))
def __check_key(self,key):
# do some checks for 1D
if self._dim_arr==1:
if type(key).__name__!='int':
raise TypeError("Key should be an integer!")
if key<(-1)*self._mesh_arr[0] or key>=self._mesh_arr[0]:
raise IndexError("Key outside the range!")
# do checks for higher dimension
else:
if len(key)!=self._dim_arr:
raise TypeError("Wrong dimensionality of key!")
for i,k in enumerate(key):
if type(k).__name__!='int':
raise TypeError("Key should be set of integers!")
if k<(-1)*self._mesh_arr[i] or k>=self._mesh_arr[i]:
raise IndexError("Key outside the range!")
def __getitem__(self,key):
# check that key is in the correct range
self.__check_key(key)
# return wavefunction
return self._wfs[key]
def __setitem__(self,key,value):
# check that key is in the correct range
self.__check_key(key)
# store wavefunction
self._wfs[key]=np.array(value,dtype=complex)
def impose_pbc(self,mesh_dir,k_dir):
r"""
If the *wf_array* object was populated using the
:func:`pythtb.wf_array.solve_on_grid` method, this function
should not be used since it will be called automatically by
the code.
The eigenfunctions :math:`\Psi_{n {\bf k}}` are by convention
chosen to obey a periodic gauge, i.e.,
:math:`\Psi_{n,{\bf k+G}}=\Psi_{n {\bf k}}` not only up to a
phase, but they are also equal in phase. It follows that
the cell-periodic Bloch functions are related by
:math:`u_{n,{\bf k+G}}=e^{-i{\bf G}\cdot{\bf r}}\Psi_{n {\bf k}}`.
See :download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` section 4.4 and equation 4.18 for
more detail. This routine sets the cell-periodic Bloch function
at the end of the string in direction :math:`{\bf G}` according
to this formula, overwriting the previous value.
This function will impose these periodic boundary conditions along
one direction of the array. We are assuming that the k-point
mesh increases by exactly one reciprocal lattice vector along
this direction. This is currently **not** checked by the code;
it is the responsibility of the user. Currently *wf_array*
does not store the k-vectors on which the model was solved;
it only stores the eigenvectors (wavefunctions).
:param mesh_dir: Direction of wf_array along which you wish to
impose periodic boundary conditions.
:param k_dir: Corresponding to the periodic k-vector direction
in the Brillouin zone of the underlying *tb_model*. Since
version 1.7.0 this parameter is defined so that it is
specified between 0 and *dim_r-1*.
See example :ref:`3site_cycle-example`, where the periodic boundary
condition is applied only along one direction of *wf_array*.
Example usage::
# Imposes periodic boundary conditions along the mesh_dir=0
# direction of the wf_array object, assuming that along that
# direction the k_dir=1 component of the k-vector is increased
# by one reciprocal lattice vector. This could happen, for
# example, if the underlying tb_model is two dimensional but
# wf_array is a one-dimensional path along k_y direction.
wf.impose_pbc(mesh_dir=0,k_dir=1)
"""
if k_dir not in self._model._per:
raise Exception("Periodic boundary condition can be specified only along periodic directions!")
# Compute phase factors
ffac=np.exp(-2.j*np.pi*self._orb[:,k_dir])
if self._nspin==1:
phase=ffac
else:
# for spinors, same phase multiplies both components
phase=np.zeros((self._norb,2),dtype=complex)
phase[:,0]=ffac
phase[:,1]=ffac
# Copy first eigenvector onto last one, multiplying by phase factors
# We can use numpy broadcasting since the orbital index is last
if mesh_dir==0:
self._wfs[-1,...]=self._wfs[0,...]*phase
elif mesh_dir==1:
self._wfs[:,-1,...]=self._wfs[:,0,...]*phase
elif mesh_dir==2:
self._wfs[:,:,-1,...]=self._wfs[:,:,0,...]*phase
elif mesh_dir==3:
self._wfs[:,:,:,-1,...]=self._wfs[:,:,:,0,...]*phase
else:
raise Exception("\n\nWrong value of mesh_dir.")
def impose_loop(self,mesh_dir):
r"""
If the user knows that the first and last points along the
*mesh_dir* direction correspond to the same Hamiltonian (this
is **not** checked), then this routine can be used to set the
eigenvectors equal (with equal phase), by replacing the last
eigenvector with the first one (for each band, and for each
other mesh direction, if any).
This routine should not be used if the first and last points
are related by a reciprocal lattice vector; in that case,
:func:`pythtb.wf_array.impose_pbc` should be used instead.
:param mesh_dir: Direction of wf_array along which you wish to
impose periodic boundary conditions.
Example usage::
# Suppose the wf_array object is three-dimensional
# corresponding to (kx,ky,lambda) where (kx,ky) are
# wavevectors of a 2D insulator and lambda is an
# adiabatic parameter that goes around a closed loop.
# Then to insure that the states at the ends of the lambda
# path are equal (with equal phase) in preparation for
# computing Berry phases in lambda for given (kx,ky),
# do wf.impose_loop(mesh_dir=2)
"""
# Copy first eigenvector onto last one
if mesh_dir==0:
self._wfs[-1,...]=self._wfs[0,...]
elif mesh_dir==1:
self._wfs[:,-1,...]=self._wfs[:,0,...]
elif mesh_dir==2:
self._wfs[:,:,-1,...]=self._wfs[:,:,0,...]
elif mesh_dir==3:
self._wfs[:,:,:,-1,...]=self._wfs[:,:,:,0,...]
else:
raise Exception("\n\nWrong value of mesh_dir.")
def berry_phase(self,occ,dir=None,contin=True,berry_evals=False):
r"""
Computes the Berry phase along a given array direction and
for a given set of occupied states. This assumes that the
occupied bands are well separated in energy from unoccupied
bands. It is the responsibility of the user to check that
this is satisfied. By default, the Berry phase traced over
occupied bands is returned, but optionally the individual
phases of the eigenvalues of the global unitary rotation
matrix (corresponding to "maximally localized Wannier
centers" or "Wilson loop eigenvalues") can be requested
(see parameter *berry_evals* for more details).
For an array of size *N* in direction $dir$, the Berry phase
is computed from the *N-1* inner products of neighboring
eigenfunctions. This corresponds to an "open-path Berry
phase" if the first and last points have no special
relation. If they correspond to the same physical
Hamiltonian, and have been properly aligned in phase using
:func:`pythtb.wf_array.impose_pbc` or
:func:`pythtb.wf_array.impose_loop`, then a closed-path
Berry phase will be computed.
For a one-dimensional wf_array (i.e., a single string), the
computed Berry phases are always chosen to be between -pi and pi.
For a higher dimensional wf_array, the Berry phase is computed
for each one-dimensional string of points, and an array of
Berry phases is returned. The Berry phase for the first string
(with lowest index) is always constrained to be between -pi and
pi. The range of the remaining phases depends on the value of
the input parameter *contin*.
The discretized formula used to compute Berry phase is described
in Sec. 4.5 of :download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>`.
:param occ: Array of indices of energy bands which are considered
to be occupied.
:param dir: Index of wf_array direction along which Berry phase is
computed. This parameters needs not be specified for
a one-dimensional wf_array.
:param contin: Optional boolean parameter. If True then the
branch choice of the Berry phase (which is indeterminate
modulo 2*pi) is made so that neighboring strings (in the
direction of increasing index value) have as close as
possible phases. The phase of the first string (with lowest
index) is always constrained to be between -pi and pi. If
False, the Berry phase for every string is constrained to be
between -pi and pi. The default value is True.
:param berry_evals: Optional boolean parameter. If True then
will compute and return the phases of the eigenvalues of the
product of overlap matrices. (These numbers correspond also
to hybrid Wannier function centers.) These phases are either
forced to be between -pi and pi (if *contin* is *False*) or
they are made to be continuous (if *contin* is True).
:returns:
* **pha** -- If *berry_evals* is False (default value) then
returns the Berry phase for each string. For a
one-dimensional wf_array this is just one number. For a
higher-dimensional wf_array *pha* contains one phase for
each one-dimensional string in the following format. For
example, if *wf_array* contains k-points on mesh with
indices [i,j,k] and if direction along which Berry phase
is computed is *dir=1* then *pha* will be two dimensional
array with indices [i,k], since Berry phase is computed
along second direction. If *berry_evals* is True then for
each string returns phases of all eigenvalues of the
product of overlap matrices. In the convention used for
previous example, *pha* in this case would have indices
[i,k,n] where *n* refers to index of individual phase of
the product matrix eigenvalue.
Example usage::
# Computes Berry phases along second direction for three lowest
# occupied states. For example, if wf is threedimensional, then
# pha[2,3] would correspond to Berry phase of string of states
# along wf[2,:,3]
pha = wf.berry_phase([0, 1, 2], 1)
See also these examples: :ref:`haldane_bp-example`,
:ref:`cone-example`, :ref:`3site_cycle-example`,
"""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#if dir<0 or dir>self._dim_arr-1:
# raise Exception("\n\nDirection key out of range")
#
# This could be coded more efficiently, but it is hard-coded for now.
#
# 1D case
if self._dim_arr==1:
# pick which wavefunctions to use
wf_use=self._wfs[:,occ,:]
# calculate berry phase
ret=_one_berry_loop(wf_use,berry_evals)
# 2D case
elif self._dim_arr==2:
# choice along which direction you wish to calculate berry phase
if dir==0:
ret=[]
for i in range(self._mesh_arr[1]):
wf_use=self._wfs[:,i,:,:][:,occ,:]
ret.append(_one_berry_loop(wf_use,berry_evals))
elif dir==1:
ret=[]
for i in range(self._mesh_arr[0]):
wf_use=self._wfs[i,:,:,:][:,occ,:]
ret.append(_one_berry_loop(wf_use,berry_evals))
else:
raise Exception("\n\nWrong direction for Berry phase calculation!")
# 3D case
elif self._dim_arr==3:
# choice along which direction you wish to calculate berry phase
if dir==0:
ret=[]
for i in range(self._mesh_arr[1]):
ret_t=[]
for j in range(self._mesh_arr[2]):
wf_use=self._wfs[:,i,j,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
elif dir==1:
ret=[]
for i in range(self._mesh_arr[0]):
ret_t=[]
for j in range(self._mesh_arr[2]):
wf_use=self._wfs[i,:,j,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
elif dir==2:
ret=[]
for i in range(self._mesh_arr[0]):
ret_t=[]
for j in range(self._mesh_arr[1]):
wf_use=self._wfs[i,j,:,:,:][:,occ,:]
ret_t.append(_one_berry_loop(wf_use,berry_evals))
ret.append(ret_t)
else:
raise Exception("\n\nWrong direction for Berry phase calculation!")
else:
raise Exception("\n\nWrong dimensionality!")
# convert phases to numpy array
if self._dim_arr>1 or berry_evals==True:
ret=np.array(ret,dtype=float)
# make phases of eigenvalues continuous
if contin==True:
# iron out 2pi jumps, make the gauge choice such that first phase in the
# list is fixed, others are then made continuous.
if berry_evals==False:
# 2D case
if self._dim_arr==2:
ret=_one_phase_cont(ret,ret[0])
# 3D case
elif self._dim_arr==3:
for i in range(ret.shape[1]):
if i==0: clos=ret[0,0]
else: clos=ret[0,i-1]
ret[:,i]=_one_phase_cont(ret[:,i],clos)
elif self._dim_arr!=1:
raise Exception("\n\nWrong dimensionality!")
# make eigenvalues continuous. This does not take care of band-character
# at band crossing for example it will just connect pairs that are closest
# at neighboring points.
else:
# 2D case
if self._dim_arr==2:
ret=_array_phases_cont(ret,ret[0,:])
# 3D case
elif self._dim_arr==3:
for i in range(ret.shape[1]):
if i==0: clos=ret[0,0,:]
else: clos=ret[0,i-1,:]
ret[:,i]=_array_phases_cont(ret[:,i],clos)
elif self._dim_arr!=1:
raise Exception("\n\nWrong dimensionality!")
return ret
def position_matrix(self, key, occ, dir):
"""Similar to :func:`pythtb.tb_model.position_matrix`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_matrix(evec,dir)
def position_expectation(self, key, occ, dir):
"""Similar to :func:`pythtb.tb_model.position_expectation`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_expectation(evec,dir)
def position_hwf(self, key, occ, dir, hwf_evec=False, basis="bloch"):
"""Similar to :func:`pythtb.tb_model.position_hwf`. Only
difference is that states are now specified with key in the
mesh *key* and indices of bands *occ*."""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
#
evec=self._wfs[tuple(key)][occ]
return self._model.position_hwf(evec,dir,hwf_evec,basis)
def berry_flux(self,occ,dirs=None,individual_phases=False):
r"""
In the case of a 2-dimensional *wf_array* array calculates the
integral of Berry curvature over the entire plane. In higher
dimensional case (3 or 4) it will compute integrated curvature
over all 2-dimensional slices of a higher-dimensional
*wf_array*.
:param occ: Array of indices of energy bands which are considered
to be occupied.
:param dirs: Array of indices of two wf_array directions on which
the Berry flux is computed. This parameter needs not be
specified for a two-dimensional wf_array. By default *dirs* takes
first two directions in the array.
:param individual_phases: If *True* then returns Berry phase
for each plaquette (small square) in the array. Default
value is *False*.
:returns:
* **flux** -- In a 2-dimensional case returns and integral
of Berry curvature (if *individual_phases* is *True* then
returns integral of Berry phase around each plaquette).
In higher dimensional case returns integral of Berry
curvature over all slices defined with directions *dirs*.
Returned value is an array over the remaining indices of
*wf_array*. (If *individual_phases* is *True* then it
returns again phases around each plaquette for each
slice. First indices define the slice, last two indices
index the plaquette.)
Example usage::
# Computes integral of Berry curvature of first three bands
flux = wf.berry_flux([0, 1, 2])
"""
# check if model came from w90
if self._model._assume_position_operator_diagonal==False:
_offdiag_approximation_warning_and_stop()
# default case is to take first two directions for flux calculation
if dirs==None:
dirs=[0,1]
# consistency checks
if dirs[0]==dirs[1]:
raise Exception("Need to specify two different directions for Berry flux calculation.")
if dirs[0]>=self._dim_arr or dirs[1]>=self._dim_arr or dirs[0]<0 or dirs[1]<0:
raise Exception("Direction for Berry flux calculation out of bounds.")
# 2D case
if self._dim_arr==2:
# compute the fluxes through all plaquettes on the entire plane
ord=list(range(len(self._wfs.shape)))
# select two directions from dirs
ord[0]=dirs[0]
ord[1]=dirs[1]
plane_wfs=self._wfs.transpose(ord)
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes
all_phases=_one_flux_plane(plane_wfs)
# return either total flux or individual phase for each plaquete
if individual_phases==False:
return all_phases.sum()
else:
return all_phases
# 3D or 4D case
elif self._dim_arr in [3,4]:
# compute the fluxes through all plaquettes on the entire plane
ord=list(range(len(self._wfs.shape)))
# select two directions from dirs
ord[0]=dirs[0]
ord[1]=dirs[1]
# find directions over which we wish to loop
ld=list(range(self._dim_arr))
ld.remove(dirs[0])
ld.remove(dirs[1])
if len(ld)!=self._dim_arr-2:
raise Exception("Hm, this should not happen? Inconsistency with the mesh size.")
# add remaining indices
if self._dim_arr==3:
ord[2]=ld[0]
if self._dim_arr==4:
ord[2]=ld[0]
ord[3]=ld[1]
# reorder wavefunctions
use_wfs=self._wfs.transpose(ord)
# loop over the the remaining direction
if self._dim_arr==3:
slice_phases=np.zeros((self._mesh_arr[ord[2]],self._mesh_arr[dirs[0]]-1,self._mesh_arr[dirs[1]]-1),dtype=float)
for i in range(self._mesh_arr[ord[2]]):
# take a 2d slice
plane_wfs=use_wfs[:,:,i]
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes on the slice
slice_phases[i,:,:]=_one_flux_plane(plane_wfs)
elif self._dim_arr==4:
slice_phases=np.zeros((self._mesh_arr[ord[2]],self._mesh_arr[ord[3]],self._mesh_arr[dirs[0]]-1,self._mesh_arr[dirs[1]]-1),dtype=float)
for i in range(self._mesh_arr[ord[2]]):
for j in range(self._mesh_arr[ord[3]]):
# take a 2d slice
plane_wfs=use_wfs[:,:,i,j]
# take bands of choice
plane_wfs=plane_wfs[:,:,occ]
# compute fluxes on the slice
slice_phases[i,j,:,:]=_one_flux_plane(plane_wfs)
# return either total flux or individual phase for each plaquete
if individual_phases==False:
return slice_phases.sum(axis=(-2,-1))
else:
return slice_phases
else:
raise Exception("\n\nWrong dimensionality!")
def berry_curv(self,occ,individual_phases=False):
r"""
.. warning:: This function has been renamed as :func:`pythtb.berry_flux` and is provided
here only for backwards compatibility with versions of pythtb prior to 1.7.0. Please
use related :func:`pythtb.berry_flux` as this function may not exist in future releases.
"""
print("""
Warning:
Usage of function berry_curv is discouraged.
It has been renamed as berry_flux, which should be used instead.
""")
return self.berry_flux(occ,individual_phases)
def k_path(kpts,nk,endpoint=True):
r"""
.. warning:: This function is here only for backwards compatibility
with version of pythtb prior to 1.7.0. Please use related :func:`pythtb.tb_model.k_path`
function as this function might not exist in the future releases of the code.
"""
print("""
Warning:
Usage of function k_path is discouraged.
Instead of the following code:
k_vec=k_path(...)
please use the following code:
(k_vec,k_dist,k_node)=my_model.k_path(...)
Note that this k_path function is a member of the tb_model class.
""")
if kpts=='full':
# this means the full Brillouin zone for 1D case
if endpoint==True:
return np.arange(nk+1,dtype=float)/float(nk)
else:
return np.arange(nk,dtype=float)/float(nk)
elif kpts=='half':
# this means the half Brillouin zone for 1D case
if endpoint==True:
return np.arange(nk+1,dtype=float)/float(2.*nk)
else:
return np.arange(nk,dtype=float)/float(2.*nk)
else:
# general case
kint=[]
k_list=np.array(kpts)
# go over all kpoints
for i in range(len(k_list)-1):
# go over all steps
for j in range(nk):
cur=k_list[i]+(k_list[i+1]-k_list[i])*float(j)/float(nk)
kint.append(cur)
# add last point
if endpoint==True:
kint.append(k_list[-1])
#
kint=np.array(kint)
return kint
def _nicefy_eig(eval,eig=None):
"Sort eigenvaules and eigenvectors, if given, and convert to real numbers"
# first take only real parts of the eigenvalues
eval=np.array(eval.real,dtype=float)
# sort energies
args=eval.argsort()
eval=eval[args]
if not (eig is None):
eig=eig[args]
return (eval,eig)
return eval
# for nice justified printout
def _nice_float(x,just,rnd):
return str(round(x,rnd)).rjust(just)
def _nice_int(x,just):
return str(x).rjust(just)
def _nice_complex(x,just,rnd):
ret=""
ret+=_nice_float(complex(x).real,just,rnd)
if complex(x).imag<0.0:
ret+=" - "
else:
ret+=" + "
ret+=_nice_float(abs(complex(x).imag),just,rnd)
ret+=" i"
return ret
def _wf_dpr(wf1,wf2):
"""calculate dot product between two wavefunctions.
wf1 and wf2 are of the form [orbital,spin]"""
return np.dot(wf1.flatten().conjugate(),wf2.flatten())
def _one_berry_loop(wf,berry_evals=False):
"""Do one Berry phase calculation (also returns a product of M
matrices). Always returns numbers between -pi and pi. wf has
format [kpnt,band,orbital,spin] and kpnt has to be one dimensional.
Assumes that first and last k-point are the same. Therefore if
there are n wavefunctions in total, will calculate phase along n-1
links only! If berry_evals is True then will compute phases for
individual states, these corresponds to 1d hybrid Wannier
function centers. Otherwise just return one number, Berry phase."""
# number of occupied states
nocc=wf.shape[1]
# temporary matrices
prd=np.identity(nocc,dtype=complex)
ovr=np.zeros([nocc,nocc],dtype=complex)
# go over all pairs of k-points, assuming that last point is overcounted!
for i in range(wf.shape[0]-1):
# generate overlap matrix, go over all bands
for j in range(nocc):
for k in range(nocc):
ovr[j,k]=_wf_dpr(wf[i,j,:],wf[i+1,k,:])
# only find Berry phase
if berry_evals==False:
# multiply overlap matrices
prd=np.dot(prd,ovr)
# also find phases of individual eigenvalues
else:
# cleanup matrices with SVD then take product
matU,sing,matV=np.linalg.svd(ovr)
prd=np.dot(prd,np.dot(matU,matV))
# calculate Berry phase
if berry_evals==False:
det=np.linalg.det(prd)
pha=(-1.0)*np.angle(det)
return pha
# calculate phases of all eigenvalues
else:
evals=np.linalg.eigvals(prd)
eval_pha=(-1.0)*np.angle(evals)
# sort these numbers as well
eval_pha=np.sort(eval_pha)
return eval_pha
def _one_flux_plane(wfs2d):
"Compute fluxes on a two-dimensional plane of states."
# size of the mesh
nk0=wfs2d.shape[0]
nk1=wfs2d.shape[1]
# number of bands (will compute flux of all bands taken together)
nbnd=wfs2d.shape[2]
# here store flux through each plaquette of the mesh
all_phases=np.zeros((nk0-1,nk1-1),dtype=float)
# go over all plaquettes
for i in range(nk0-1):
for j in range(nk1-1):
# generate a small loop made out of four pieces
wf_use=[]
wf_use.append(wfs2d[i,j])
wf_use.append(wfs2d[i+1,j])
wf_use.append(wfs2d[i+1,j+1])
wf_use.append(wfs2d[i,j+1])
wf_use.append(wfs2d[i,j])
wf_use=np.array(wf_use,dtype=complex)
# calculate phase around one plaquette
all_phases[i,j]=_one_berry_loop(wf_use)
return all_phases
def no_2pi(x,clos):
"Make x as close to clos by adding or removing 2pi"
while abs(clos-x)>np.pi:
if clos-x>np.pi:
x+=2.0*np.pi
elif clos-x<-1.0*np.pi:
x-=2.0*np.pi
return x
def _one_phase_cont(pha,clos):
"""Reads in 1d array of numbers *pha* and makes sure that they are
continuous, i.e., that there are no jumps of 2pi. First number is
made as close to *clos* as possible."""
ret=np.copy(pha)
# go through entire list and "iron out" 2pi jumps
for i in range(len(ret)):
# which number to compare to
if i==0: cmpr=clos
else: cmpr=ret[i-1]
# make sure there are no 2pi jumps
ret[i]=no_2pi(ret[i],cmpr)
return ret
def _array_phases_cont(arr_pha,clos):
"""Reads in 2d array of phases *arr_pha* and makes sure that they
are continuous along first index, i.e., that there are no jumps of
2pi. First array of phasese is made as close to *clos* as
possible."""
ret=np.zeros_like(arr_pha)
# go over all points
for i in range(arr_pha.shape[0]):
# which phases to compare to
if i==0: cmpr=clos
else: cmpr=ret[i-1,:]
# remember which indices are still available to be matched
avail=list(range(arr_pha.shape[1]))
# go over all phases in cmpr[:]
for j in range(cmpr.shape[0]):
# minimal distance between pairs
min_dist=1.0E10
# closest index
best_k=None
# go over each phase in arr_pha[i,:]
for k in avail:
cur_dist=np.abs(np.exp(1.0j*cmpr[j])-np.exp(1.0j*arr_pha[i,k]))
if cur_dist<=min_dist:
min_dist=cur_dist
best_k=k
# remove this index from being possible pair later
avail.pop(avail.index(best_k))
# store phase in correct place
ret[i,j]=arr_pha[i,best_k]
# make sure there are no 2pi jumps
ret[i,j]=no_2pi(ret[i,j],cmpr[j])
return ret
class w90(object):
r"""
This class of the PythTB package imports tight-binding model
parameters from an output of a `Wannier90
<http://www.wannier.org>`_ code.
The `Wannier90 <http://www.wannier.org>`_ code is a
post-processing tool that takes as an input electron wavefunctions
and energies computed from first-principles using any of the
following codes: Quantum-Espresso (PWscf), AbInit, SIESTA, FLEUR,
Wien2k, VASP. As an output Wannier90 will create files that
contain parameters for a tight-binding model that exactly
reproduces the first-principles calculated electron band
structure.
The interface from Wannier90 to PythTB will use only the following
files created by Wannier90:
- *prefix*.win
- *prefix*\_hr.dat
- *prefix*\_centres.xyz
- *prefix*\_band.kpt (optional)
- *prefix*\_band.dat (optional)
The first file (*prefix*.win) is an input file to Wannier90 itself. This
file is needed so that PythTB can read in the unit cell vectors.
To correctly create the second and the third file (*prefix*\_hr.dat and
*prefix*\_centres.dat) one needs to include the following flags in the win
file::
hr_plot = True
write_xyz = True
translate_home_cell = False
These lines ensure that *prefix*\_hr.dat and *prefix*\_centres.dat
are written and that the centers of the Wannier functions written
in the *prefix*\_centres.dat file are not translated to the home
cell. The *prefix*\_hr.dat file contains the onsite and hopping
terms.
The final two files (*prefix*\_band.kpt and *prefix*\_band.dat)
are optional. Please see documentation of function
:func:`pythtb.w90.w90_bands_consistency` for more detail.
So far we tested only Wannier90 version 2.0.1.
.. warning:: For the time being PythTB is not optimized to be used
with very large tight-binding models. Therefore it is not
advisable to use the interface to Wannier90 with large
first-principles calculations that contain many k-points and/or
electron bands. One way to reduce the computational cost is to
wannierize with Wannier90 only the bands of interest (for
example, bands near the Fermi level).
Units used throught this interface with Wannier90 are
electron-volts (eV) and Angstroms.
.. warning:: User needs to make sure that the Wannier functions
computed using Wannier90 code are well localized. Otherwise the
tight-binding model might not interpolate well the band
structure. To ensure that the Wannier functions are well
localized it is often enough to check that the total spread at
the beginning of the minimization procedure (first total spread
printed in .wout file) is not more than 20% larger than the
total spread at the end of the minimization procedure. If those
spreads differ by much more than 20% user needs to specify
better initial projection functions.
In addition, please note that the interpolation is valid only
within the frozen energy window of the disentanglement
procedure.
.. warning:: So far PythTB assumes that the position operator is
diagonal in the tight-binding basis. This is discussed in the
:download:`notes on tight-binding formalism
<misc/pythtb-formalism.pdf>` in Eq. 2.7.,
:math:`\langle\phi_{{\bf R} i} \vert {\bf r} \vert \phi_{{\bf
R}' j} \rangle = ({\bf R} + {\bf t}_j) \delta_{{\bf R} {\bf R}'}
\delta_{ij}`. However, this relation does not hold for Wannier
functions! Therefore, if you use tight-binding model derived
from this class in computing Berry-like objects that involve
position operator such as Berry phase or Berry flux, you would
not get the same result as if you computed those objects
directly from the first-principles code! Nevertheless, this
approximation does not affect other properties such as band
structure dispersion.
For the testing purposes user can download the following
:download:`wannier90 output example
<misc/wannier90_example.tar.gz>` and use the following
:ref:`script <w90_quick>` to test the functionality of the interface to
PythTB. Run the following command in unix terminal to decompress
the tarball::
tar -zxf wannier90_example.tar.gz
and then run the following :ref:`script <w90_quick>` in the same
folder.
:param path: Relative path to the folder that contains Wannier90
files. These are *prefix*.win, *prefix*\_hr.dat,
*prefix*\_centres.dat and optionally *prefix*\_band.kpt and
*prefix*\_band.dat.
:param prefix: This is the prefix used by Wannier90 code.
Typically the input to the Wannier90 code is name *prefix*.win.
Initially this function will read in the entire Wannier90 output.
To create :class:`pythtb.tb_model` object user needs to call
:func:`pythtb.w90.model`.
Example usage::
# reads Wannier90 from folder called *example_a*
# it assumes that that folder contains files "silicon.win" and so on
silicon=w90("example_a", "silicon")
"""
def __init__(self,path,prefix):
# store path and prefix
self.path=path
self.prefix=prefix
# read in lattice_vectors
f=open(self.path+"/"+self.prefix+".win","r")
ln=f.readlines()
f.close()
# get lattice vector
self.lat=np.zeros((3,3),dtype=float)
found=False
for i in range(len(ln)):
sp=ln[i].split()
if len(sp)>=2:
if sp[0].lower()=="begin" and sp[1].lower()=="unit_cell_cart":
# get units right
if ln[i+1].strip().lower()=="bohr":
pref=0.5291772108
skip=1
elif ln[i+1].strip().lower() in ["ang","angstrom"]:
pref=1.0
skip=1
else:
pref=1.0
skip=0
# now get vectors
for j in range(3):
sp=ln[i+skip+1+j].split()
for k in range(3):
self.lat[j,k]=float(sp[k])*pref
found=True
break
if found==False:
raise Exception("Unable to find unit_cell_cart block in the .win file.")
# read in hamiltonian matrix, in eV
f=open(self.path+"/"+self.prefix+"_hr.dat","r")
ln=f.readlines()
f.close()
#
# get number of wannier functions
self.num_wan=int(ln[1])
# get number of Wigner-Seitz points
num_ws=int(ln[2])
# get degenereacies of Wigner-Seitz points
deg_ws=[]
for j in range(3,len(ln)):
sp=ln[j].split()
for s in sp:
deg_ws.append(int(s))
if len(deg_ws)==num_ws:
last_j=j
break
if len(deg_ws)>num_ws:
raise Exception("Too many degeneracies for WS points!")
deg_ws=np.array(deg_ws,dtype=int)
# now read in matrix elements
# Convention used in w90 is to write out:
# R1, R2, R3, i, j, ham_r(i,j,R)
# where ham_r(i,j,R) corresponds to matrix element < i | H | j+R >
self.ham_r={} # format is ham_r[(R1,R2,R3)]["h"][i,j] for < i | H | j+R >
ind_R=0 # which R vector in line is this?
for j in range(last_j+1,len(ln)):
sp=ln[j].split()
# get reduced lattice vector components
ham_R1=int(sp[0])
ham_R2=int(sp[1])
ham_R3=int(sp[2])
# get Wannier indices
ham_i=int(sp[3])-1
ham_j=int(sp[4])-1
# get matrix element
ham_val=float(sp[5])+1.0j*float(sp[6])
# store stuff, for each R store hamiltonian and degeneracy
ham_key=(ham_R1,ham_R2,ham_R3)
if (ham_key in self.ham_r)==False:
self.ham_r[ham_key]={
"h":np.zeros((self.num_wan,self.num_wan),dtype=complex),
"deg":deg_ws[ind_R]
}
ind_R+=1
self.ham_r[ham_key]["h"][ham_i,ham_j]=ham_val
# check if for every non-zero R there is also -R
for R in self.ham_r:
if not (R[0]==0 and R[1]==0 and R[2]==0):
found_pair=False
for P in self.ham_r:
if not (R[0]==0 and R[1]==0 and R[2]==0):
# check if they are opposite
if R[0]==-P[0] and R[1]==-P[1] and R[2]==-P[2]:
if found_pair==True:
raise Exception("Found duplicate negative R!")
found_pair=True
if found_pair==False:
raise Exception("Did not find negative R for R = "+R+"!")
# read in wannier centers
f=open(self.path+"/"+self.prefix+"_centres.xyz","r")
ln=f.readlines()
f.close()
# Wannier centers in Cartesian, Angstroms
xyz_cen=[]
for i in range(2,2+self.num_wan):
sp=ln[i].split()
if sp[0]=="X":
tmp=[]
for j in range(3):
tmp.append(float(sp[j+1]))
xyz_cen.append(tmp)
else:
raise Exception("Inconsistency in the centres file.")
self.xyz_cen=np.array(xyz_cen,dtype=float)
# get orbital positions in reduced coordinates
self.red_cen=_cart_to_red((self.lat[0],self.lat[1],self.lat[2]),self.xyz_cen)
def model(self,zero_energy=0.0,min_hopping_norm=None,max_distance=None,ignorable_imaginary_part=None):
"""
This function returns :class:`pythtb.tb_model` object that can
be used to interpolate the band structure at arbitrary
k-point, analyze the wavefunction character, etc.
The tight-binding basis orbitals in the returned object are
maximally localized Wannier functions as computed by
Wannier90. The orbital character of these functions can be
inferred either from the *projections* block in the
*prefix*.win or from the *prefix*.nnkp file. Please note that
the character of the maximally localized Wannier functions is
not exactly the same as that specified by the initial
projections. One way to ensure that the Wannier functions are
as close to the initial projections as possible is to first
choose a good set of initial projections (for these initial
and final spread should not differ more than 20%) and then
perform another Wannier90 run setting *num_iter=0* in the
*prefix*.win file.
Number of spin components is always set to 1, even if the
underlying DFT calculation includes spin. Please refer to the
*projections* block or the *prefix*.nnkp file to see which
orbitals correspond to which spin.
Locations of the orbitals in the returned
:class:`pythtb.tb_model` object are equal to the centers of
the Wannier functions computed by Wannier90.
:param zero_energy: Sets the zero of the energy in the band
structure. This value is typically set to the Fermi level
computed by the density-functional code (or to the top of the
valence band). Units are electron-volts.
:param min_hopping_norm: Hopping terms read from Wannier90 with
complex norm less than *min_hopping_norm* will not be included
in the returned tight-binding model. This parameters is
specified in electron-volts. By default all terms regardless
of their norm are included.
:param max_distance: Hopping terms from site *i* to site *j+R* will
be ignored if the distance from orbital *i* to *j+R* is larger
than *max_distance*. This parameter is given in Angstroms.
By default all terms regardless of the distance are included.
:param ignorable_imaginary_part: The hopping term will be assumed to
be exactly real if the absolute value of the imaginary part as
computed by Wannier90 is less than *ignorable_imaginary_part*.
By default imaginary terms are not ignored. Units are again
eV.
:returns:
* **tb** -- The object of type :class:`pythtb.tb_model` that can be used to
interpolate Wannier90 band structure to an arbitrary k-point as well
as to analyze the character of the wavefunctions. Please note
Example usage::
# returns tb_model with all hopping parameters
my_model=silicon.model()
# simplified model that contains only hopping terms above 0.01 eV
my_model_simple=silicon.model(min_hopping_norm=0.01)
my_model_simple.display()
"""
# make the model object
tb=tb_model(3,3,self.lat,self.red_cen)
# remember that this model was computed from w90
tb._assume_position_operator_diagonal=False
# add onsite energies
onsite=np.zeros(self.num_wan,dtype=float)
for i in range(self.num_wan):
tmp_ham=self.ham_r[(0,0,0)]["h"][i,i]/float(self.ham_r[(0,0,0)]["deg"])
onsite[i]=tmp_ham.real
if np.abs(tmp_ham.imag)>1.0E-9:
raise Exception("Onsite terms should be real!")
tb.set_onsite(onsite-zero_energy)
# add hopping terms
for R in self.ham_r:
# avoid double counting
use_this_R=True
# avoid onsite terms
if R[0]==0 and R[1]==0 and R[2]==0:
avoid_diagonal=True
else:
avoid_diagonal=False
# avoid taking both R and -R
if R[0]!=0:
if R[0]<0:
use_this_R=False
else:
if R[1]!=0:
if R[1]<0:
use_this_R=False
else:
if R[2]<0:
use_this_R=False
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
# scan through unique R
if use_this_R==True:
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# get distance between orbitals
dist_ijR=np.sqrt(np.dot(-vec_i+vec_j+vecR,
-vec_i+vec_j+vecR))
# to prevent double counting
if not (avoid_diagonal==True and j<=i):
# only if distance between orbitals is small enough
if max_distance is not None:
if dist_ijR>max_distance:
continue
# divide the matrix element from w90 with the degeneracy
tmp_ham=self.ham_r[R]["h"][i,j]/float(self.ham_r[R]["deg"])
# only if big enough matrix element
if min_hopping_norm is not None:
if np.abs(tmp_ham)<min_hopping_norm:
continue
# remove imaginary part if needed
if ignorable_imaginary_part is not None:
if np.abs(tmp_ham.imag)<ignorable_imaginary_part:
tmp_ham=tmp_ham.real+0.0j
# set the hopping term
tb.set_hop(tmp_ham,i,j,list(R))
return tb
def dist_hop(self):
"""
This is one of the diagnostic tools that can be used to help
in determining *min_hopping_norm* and *max_distance* parameter in
:func:`pythtb.w90.model` function call.
This function returns all hopping terms (from orbital *i* to
*j+R*) as well as the distances between the *i* and *j+R*
orbitals. For well localized Wannier functions hopping term
should decay exponentially with distance.
:returns:
* **dist** -- Distances between Wannier function centers (*i* and *j+R*) in Angstroms.
* **ham** -- Corresponding hopping terms in eV.
Example usage::
# get distances and hopping terms
(dist,ham)=silicon.dist_hop()
# plot logarithm of the hopping term as a function of distance
import pylab as plt
fig, ax = plt.subplots()
ax.scatter(dist,np.log(np.abs(ham)))
fig.savefig("localization.pdf")
"""
ret_ham=[]
ret_dist=[]
for R in self.ham_r:
# treat diagonal terms differently
if R[0]==0 and R[1]==0 and R[2]==0:
avoid_diagonal=True
else:
avoid_diagonal=False
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# diagonal terms
if not (avoid_diagonal==True and i==j):
# divide the matrix element from w90 with the degeneracy
ret_ham.append(self.ham_r[R]["h"][i,j]/float(self.ham_r[R]["deg"]))
# get distance between orbitals
ret_dist.append(np.sqrt(np.dot(-vec_i+vec_j+vecR,-vec_i+vec_j+vecR)))
return (np.array(ret_dist),np.array(ret_ham))
def shells(self,num_digits=2):
"""
This is one of the diagnostic tools that can be used to help
in determining *max_distance* parameter in
:func:`pythtb.w90.model` function call.
:param num_digits: Distances will be rounded up to these many
digits. Default value is 2.
:returns:
* **shells** -- All distances between all Wannier function centers (*i* and *j+R*) in Angstroms.
Example usage::
# prints on screen all shells
print silicon.shells()
"""
shells=[]
for R in self.ham_r:
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# get distance between orbitals
dist_ijR=np.sqrt(np.dot(-vec_i+vec_j+vecR,
-vec_i+vec_j+vecR))
# round it up
shells.append(round(dist_ijR,num_digits))
# remove duplicates and sort
shells=np.sort(list(set(shells)))
return shells
def w90_bands_consistency(self):
"""
This function reads in band structure as interpolated by
Wannier90. Please note that this is not the same as the band
structure calculated by the underlying DFT code. The two will
agree only on the coarse set of k-points that were used in
Wannier90 generation.
The purpose of this function is to compare the interpolation
in Wannier90 with that in PythTB. If no terms were ignored in
the call to :func:`pythtb.w90.model` then the two should
be exactly the same (up to numerical precision). Otherwise
one should expect deviations. However, if one carefully
chooses the cutoff parameters in :func:`pythtb.w90.model`
it is likely that one could reproduce the full band-structure
with only few dominant hopping terms. Please note that this
tests only the eigenenergies, not eigenvalues (wavefunctions).
The code assumes that the following files were generated by
Wannier90,
- *prefix*\_band.kpt
- *prefix*\_band.dat
These files will be generated only if the *prefix*.win file
contains the *kpoint_path* block.
:returns:
* **kpts** -- k-points in reduced coordinates used in the
interpolation in Wannier90 code. The format of *kpts* is
the same as the one used by the input to
:func:`pythtb.tb_model.solve_all`.
* **ene** -- energies interpolated by Wannier90 in
eV. Format is ene[band,kpoint].
Example usage::
# get band structure from wannier90
(w90_kpt,w90_evals)=silicon.w90_bands_consistency()
# get simplified model
my_model_simple=silicon.model(min_hopping_norm=0.01)
# solve simplified model on the same k-path as in wannier90
evals=my_model.solve_all(w90_kpt)
# plot comparison of the two
import pylab as plt
fig, ax = plt.subplots()
for i in range(evals.shape[0]):
ax.plot(range(evals.shape[1]),evals[i],"r-",zorder=-50)
for i in range(w90_evals.shape[0]):
ax.plot(range(w90_evals.shape[1]),w90_evals[i],"k-",zorder=-100)
fig.savefig("comparison.pdf")
"""
# read in kpoints in reduced coordinates
kpts=np.loadtxt(self.path+"/"+self.prefix+"_band.kpt",skiprows=1)
# ignore weights
kpts=kpts[:,:3]
# read in energies
ene=np.loadtxt(self.path+"/"+self.prefix+"_band.dat")
# ignore kpath distance
ene=ene[:,1]
# correct shape
ene=ene.reshape((self.num_wan,kpts.shape[0]))
return (kpts,ene)
def _cart_to_red(tmp,cart):
"Convert cartesian vectors cart to reduced coordinates of a1,a2,a3 vectors"
(a1,a2,a3)=tmp
# matrix with lattice vectors
cnv=np.array([a1,a2,a3])
# transpose a matrix
cnv=cnv.T
# invert a matrix
cnv=np.linalg.inv(cnv)
# reduced coordinates
red=np.zeros_like(cart,dtype=float)
for i in range(0,len(cart)):
red[i]=np.dot(cnv,cart[i])
return red
def _red_to_cart(tmp,red):
"Convert reduced to cartesian vectors."
(a1,a2,a3)=tmp
# cartesian coordinates
cart=np.zeros_like(red,dtype=float)
for i in range(0,len(cart)):
cart[i,:]=a1*red[i][0]+a2*red[i][1]+a3*red[i][2]
return cart
def _offdiag_approximation_warning_and_stop():
raise Exception("""
----------------------------------------------------------------------
It looks like you are trying to calculate Berry-like object that
involves position operator. However, you are using a tight-binding
model that was generated from Wannier90. This procedure introduces
approximation as it ignores off-diagonal elements of the position
operator in the Wannier basis. This is discussed here in more
detail:
http://physics.rutgers.edu/pythtb/usage.html#pythtb.w90
If you know what you are doing and wish to continue with the
calculation despite this approximation, please call the following
function on your tb_model object
my_model.ignore_position_operator_offdiagonal()
----------------------------------------------------------------------
""")
| lgpl-3.0 |
JosmanPS/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
gdhungana/corrLSS | py/corrLSS/correlation.py | 1 | 9562 | import numpy as np
import matplotlib.pyplot as plt
import treecorr as tc
import astropy.table
import healpy as hp
from corrLSS.util import apply_mask,radec2thetaphi
# Cosmology
def set_cosmology():
Omega_matter = 0.140247/0.6800232**2
Omega_baryon = 0.022337/0.6800232**2
Omega_curvature = 0
H0 = 68.002320
sigma_8 = 0.811322
n_s = 0.963180
from astropy.cosmology import FlatLambdaCDM
cosmo=FlatLambdaCDM(H0=H0,Om0=Omega_matter)
return cosmo
def arrange_catalog(catfile,rndfile=None,zmin=None,zmax=None,objtype=None,truthfile=None):
"""
Use treecorr to evaluate two point correlation given a data catalog and a random catalog
"""
print("Reading data catalog")
#datatab=astropy.table.Table.read(catfile)
cat=astropy.io.fits.open(catfile)
datacat=cat[1].data
try:
z_data=datacat['Z_COSMO']
print("Using Z_COSMO for z")
except:
try:
z_data=datacat['TRUEZ']
print("Using TRUEZ for z")
except:
try:
z_data=datacat['Z']
print("Using Z for z")
except:
raise ValueError("None of the specified z-types match. Check fits header")
if truthfile is not None: #- required to match targetid for ra,dec
tru=astropy.io.fits.open(truthfile)
trucat=tru[1].data
truid=trucat['TARGETID']
dataid=datacat['TARGETID']
#- map targetid sorted as in dataid
tt=np.argsort(truid)
ss=np.searchsorted(truid[tt],dataid)
srt_idx=tt[ss]
np.testing.assert_array_equal(truid[srt_idx],dataid)
print("100% targets matched for data catalog")
ra_data=trucat['RA'][srt_idx]
dec_data=trucat['DEC'][srt_idx]
else:
ra_data=datacat['ra']
dec_data=datacat['dec']
if objtype is not None:
try:
kk=np.where(datacat['SOURCETYPE']==objtype)[0]
print("Using sourcetype {}".format(objtype))
except:
try:
kk=np.where(datacat['SPECTYPE']==objtype)[0]
print("Using spectype {}".format(objtype))
except:
print("Objtype doesn't match header key. Check fits header")
print("Total {} in the data: {}".format(objtype,len(kk)))
print("Total {} in the data: {}".format(objtype,len(kk)))
ra_data=ra_data[kk]
dec_data=dec_data[kk]
z_data=z_data[kk]
cosmo=set_cosmology()
if zmin is None: zmin=np.min(z_data)
if zmax is None: zmax=np.max(z_data)
print("zmin:{} to zmax: {}".format(zmin,zmax))
#TODO make this loop for differnt redshift bins to avoid reading catalogs each time
wh=np.logical_and(z_data>zmin,z_data<zmax)
ngal=np.count_nonzero(wh)
print("Bin contains: {} galaxies".format(np.count_nonzero(wh)))
print(cosmo.H0)
cmvr_data=cosmo.comoving_distance(z_data[wh])*cosmo.H0.value/100.
dmin,dmax=cosmo.comoving_distance([zmin,zmax])*cosmo.H0.value/100.
print("Dmin to Dmax: {} to {}".format(dmin,dmax))
print("Organizing data catalog to use")
datacat=make_catalog(ra_data[wh],dec_data[wh],cmvr_data)
if rndfile is not None:
print("Reading random catalog")
#rndtab=astropy.table.Table.read(rndfile)
rnd=astropy.io.fits.open(rndfile)
rndtab=rnd[1].data
z_rnd=rndtab['z']
ra_rnd=rndtab['ra']
dec_rnd=rndtab['dec']
whr=np.logical_and(z_rnd>zmin,z_rnd<zmax)
nran=np.count_nonzero(whr)
print("Bin Contains: {} random objects".format( np.count_nonzero(whr)))
cmvr_rnd=cosmo.comoving_distance(z_rnd[whr])*cosmo.H0.value/100.
print("Organizing random catalog to use")
rndcat=make_catalog(ra_rnd[whr],dec_rnd[whr],cmvr_rnd)
return datacat, rndcat
else:
return datacat
def correlate_tc(datacat,rndcat,outfile,cutoff=None):
"""
datacat and randcat are tc.catalog object
"""
print("Auto correlating data")
dd=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
dd.process(datacat)
print("Auto correlating random")
rr=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
rr.process(rndcat)
print("Cross Correlating")
dr=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
dr.process(datacat,rndcat)
print("Calculating 2-pt. correlation")
xi,xivar=dd.calculateXi(rr,dr)
tab=astropy.table.Table([np.exp(dd.logr),xi,xivar],names=('r','xi','xivar'))
tab.write(outfile,overwrite=True)
def random_data_xyz(datacat,bandwidth=0.2,format='xyz'):
"""
data cat is treecorr catalog object and should have x, y, and z
random is created here in xyz
"""
from scipy.stats import gaussian_kde
if format=='xyz':
values=np.vstack([datacat.x,datacat.y,datacat.z])
kde=gaussian_kde(values,bw_method=bandwidth/values.std(ddof=1))
nx,ny,nz=kde.resample(2*len(datacat.z))
randcat=tc.Catalog(x=nx,y=ny,z=nz)
elif format=='radecr':
values=np.vstack([datacat.ra/datacat.ra_units,datacat.dec/datacat.dec_units,datacat.r])
kde=gaussian_kde(values,bw_method=bandwidth/values.std(ddof=1))
nra,ndec,nr=kde.resample(2*len(datacat.r))
randcat=tc.Catalog(ra=nra,dec=ndec,ra_units='deg',dec_units='deg',r=nr)
return randcat
def make_catalog(ra,dec,cmvr=None): #- ra, dec in degrees
cat=tc.Catalog(ra=ra,dec=dec,r=cmvr,ra_units='deg',dec_units='deg')
return cat
def two_point(data,data_R,bins,method='landy-szalay',seed=1234,saverandom=False):
"""
Uses nearest neighbors KDtree to evaluate two point correlation
args:
data: n samples x m features data array, eg. x,y,z positions
bins: 1d bins array
return:
two - pt correlation correlation give the method.
Errors are not returned. A bootstrap sampling can be run N times to
evaluate errors.
"""
from sklearn.neighbors import KDTree
data = np.asarray(data)
bins = np.asarray(bins)
rng = np.random.RandomState(seed)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
n_samples, n_features = data.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
if data_R is None:
data_R = data.copy()
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
else:
data_R = np.asarray(data_R)
if (data_R.ndim != 2) or (data_R.shape[-1] != n_features):
raise ValueError('data_R must have same n_features as data')
factor = len(data_R) * 1. / len(data)
KDT_D=KDTree(data)
KDT_R=KDTree(data_R)
print("Correlating Data, data size: {}".format(len(data)))
counts_DD=KDT_D.two_point_correlation(data,bins)
print('Correlating Random, random size: {}'.format(len(data_R)))
counts_RR=KDT_R.two_point_correlation(data_R,bins)
DD=np.diff(counts_DD)
RR=np.diff(counts_RR)
#- Check for zero in RR
RR_zero = (RR == 0)
RR[RR_zero]=1
if method == 'standard':
corr = factor**2*DD/RR - 1
elif method == 'landy-szalay':
print("Cross Correlating")
counts_DR=KDT_R.two_point_correlation(data,bins)
DR=np.diff(counts_DR)
print("Evaluating correlation using {}".format(method))
corr = (factor**2 * DD - 2 * factor * DR + RR)/RR
corr[RR_zero] = np.nan
return corr
def extract_catalog(catalog,zmin=None,zmax=None):
print("Reading catalog.")
tab = astropy.table.Table.read(catalog)
ra = tab['RA']
dec = tab['DEC']
z = tab['Z']
print("Objects in catalog: {}".format(len(z)))
if zmin is None: zmin=np.min(z)
if zmax is None: zmax=np.max(z)
sel=np.where((z >= zmin) & (z < zmax))
ra = ra[sel]
dec = dec[sel]
z = z[sel]
print("Objects in this redshift bin".format(z.shape[0]))
#- set cosmology
print("Setting Fiducial Cosmology")
cosmo = set_cosmology()
cmv_r = cosmo.comoving_distance(z)*cosmo.H0.value/100.
#- Coordinates:
carx,cary,carz = ra_dec_to_xyz(ra,dec) * cmv_r
#- set data:
data=np.transpose([carx,cary,carz])
return data
def make_data_R_catalog(datacat,outfile='random_from_datacat.fits',seed=1234):
"""
Make random background from shuffling data
"""
print("Reading data catalog")
datatab=astropy.table.Table.read(datacat)
ra = datatab['ra']
dec = datatab['dec']
z = datatab['z']
data=np.transpose([ra,dec,z])
#- create random by shuffling all but 1 axis
print("Making random catalog from data")
data_R = data.copy()
n_samples, n_features = data.shape
rng = np.random.RandomState(seed)
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
randdata=astropy.table.Table([data_R[:,0],data_R[:,1],data_R[:,2]],names=('RA','DEC','Z'))
randdata.write(outfile,format='fits')
print("Written Random file from data shuffling: {}".format(outfile))
def est_correlation(data,bins,data_R=None,method='landy-szalay'):
#- correlation
print("Evaluating 2-pt Correlation.")
corr=two_point(data,bins,method=method,data_R=data_R)
return bins,corr
| mit |
has2k1/plotnine | plotnine/tests/test_position.py | 1 | 5061 | import string
import numpy as np
import pandas as pd
import pytest
from plotnine import (ggplot, aes, geom_point, geom_jitter, geom_bar,
geom_col, geom_boxplot, geom_text, geom_rect,
after_stat, position_dodge, position_dodge2,
position_jitter, position_jitterdodge,
position_nudge, position_stack, theme)
from plotnine.positions.position import position
from plotnine.exceptions import PlotnineError
n = 6
m = 10
random_state = np.random.RandomState(1234567890)
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
df2 = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
df3 = pd.DataFrame({
'x': random_state.choice(['A', 'B'], n*m),
'y': random_state.randint(0, 20, n*m),
'c': random_state.choice([False, False, True, False], n*m)
})
random_state.seed(1234567890)
_theme = theme(subplots_adjust={'right': 0.85})
def test_jitter():
df1 = pd.DataFrame({'x': [1, 2, 1, 2],
'y': [1, 1, 2, 2]})
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_jitter(size=10, color='red', random_state=random_state) +
geom_jitter(size=10, color='blue', width=0.1,
height=0.1, random_state=random_state))
assert p + _theme == 'jitter'
with pytest.raises(PlotnineError):
geom_jitter(position=position_jitter(), width=0.1)
def test_nudge():
p = (ggplot(df1, aes('x', 'y')) +
geom_point(size=10) +
geom_point(size=10, color='red',
position=position_nudge(.25, .25)))
assert p + _theme == 'nudge'
def test_stack():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='stack'))
assert p + _theme == 'stack'
def test_stack_negative():
df = df1.copy()
_loc = df.columns.get_loc
df.iloc[0, _loc('y')] *= -1
df.iloc[len(df)-1, _loc('y')] *= -1
p = (ggplot(df)
+ geom_col(aes('factor(x)', 'y', fill='factor(y)'),
position='stack')
+ geom_text(aes('factor(x)', 'y', label='y'),
position=position_stack(vjust=0.5))
)
assert p + _theme == 'stack-negative'
def test_fill():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='fill'))
assert p + _theme == 'fill'
def test_dodge():
p = (ggplot(df2, aes('factor(z)')) +
geom_bar(aes(fill='factor(x)'), position='dodge'))
assert p + _theme == 'dodge'
def test_dodge_preserve_single():
df1 = pd.DataFrame({'x': ['a', 'b', 'b'],
'y': ['a', 'a', 'b']})
p = (ggplot(df1, aes('x', fill='y')) +
geom_bar(position=position_dodge(preserve='single')))
assert p + _theme == 'dodge_preserve_single'
def test_dodge_preserve_single_text():
df1 = pd.DataFrame({'x': ['a', 'b', 'b', 'b'],
'y': ['a', 'a', 'b', 'b']})
d = position_dodge(preserve='single', width=0.9)
p = (ggplot(df1, aes('x', fill='y'))
+ geom_bar(position=d)
+ geom_text(
aes(y=after_stat('count'), label=after_stat('count')),
stat='count',
position=d,
va='bottom')
)
assert p + _theme == 'dodge_preserve_single_text'
def test_dodge2():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(position='dodge2', size=2))
assert p + _theme == 'dodge2'
def test_dodge2_varwidth():
p = (ggplot(df3, aes('x', 'y', color='c')) +
geom_boxplot(
position=position_dodge2(preserve='single'),
varwidth=True,
size=2)
)
assert p + _theme == 'dodge2_varwidth'
def test_jitterdodge():
df = pd.DataFrame({
'x': np.ones(n*2),
'y': np.repeat(np.arange(n), 2),
'letters': np.repeat(list(string.ascii_lowercase[:n]), 2)})
position = position_jitterdodge(random_state=random_state)
p = (ggplot(df, aes('x', 'y', fill='letters')) +
geom_point(size=10, fill='black') +
geom_point(size=10, position=position))
assert p + _theme == 'jitterdodge'
def test_position_from_geom():
geom = geom_point(position='jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position='position_jitter')
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter())
assert isinstance(position.from_geom(geom), position_jitter)
geom = geom_point(position=position_jitter)
assert isinstance(position.from_geom(geom), position_jitter)
def test_dodge_empty_data():
empty_df = pd.DataFrame({'x': [], 'y': []})
p = (ggplot(df1, aes('x', 'y'))
+ geom_point()
+ geom_rect(
empty_df,
aes(xmin='x', xmax='x+1', ymin='y', ymax='y+1'),
position='dodge')
)
p.draw_test()
| gpl-2.0 |
poryfly/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
JoseGuzman/myIPythonNotebooks | pub/nerst.py | 1 | 2413 | """
nerst.py
Jose Guzman, [email protected]
Created: Fri Apr 29 08:04:57 CEST 2016
Solves the Nerst equation for different chloride conditions.
"""
from math import log
from terminaltables import AsciiTable
import numpy as np
def nerst(t, oution, inion, z):
"""
Solves the following equation:
.. math::
E = \frac{R T}{z F} \ln\frac{out}{in}\\
Arguments
---------
oution : float
Extracellular ionic concentration
intion : float
Intracellular ionic concentration
z : int
Valence of the ion
t : temp
Temperature in celsius
Returns
-------
voltage : float
the potential (in mV) at which the net flux of current is zero.
Examples
--------
>>> nerst( t = 30, oution = 126.5, inion = 10, z=-1)
>>> -66.294937
"""
K = 273.15 + t # transform in kelvin
volt = ((8.31451*K)/(z*96485.3))*log(oution/inion)
return(volt*1000) # in mV
if __name__ == '__main__':
data = [['Reference', 'E_rev (Cl-) mV']]
# data from Pavlidis % Madison 1999
ECl = nerst(t = 30, oution=126.5, inion=10, z=-1)
data.append(['Pavlidis & Madison, 1999', ECl])
# data from Sasaki et al
ECl = nerst(t = 30, oution=133.3, inion=4, z=-1)
data.append(['Sasaki et al., 2012', ECl])
# data from Mitra et al, 2011
ECl = nerst(t = 20, oution=129, inion=9, z=-1)
data.append(['Mitra et al., 2011', ECl])
# data from Kraushaar and Jonas, 2000
ECl = nerst(t = 20, oution=126.5, inion=149, z=-1)
data.append(['Krausharr and Jonas, 2000',ECl])
# data from Espinoza
ECl = nerst(t = 20, oution=133.5, inion=44, z=-1)
data.append(['Espinoza et al., **', ECl])
# data for brain organoids (for Cl)
ECl = nerst(t = 20, oution=133.5, inion=28, z=-1)
data.append(['Guzman et al., **', ECl])
table = AsciiTable(data)
print table.table
import matplotlib.pyplot as plt
x = np.arange(0.1, 50.0, 0.01)
k = lambda x:58*np.log10(x/100.) # K-nerst equation
y = k(x)
plt.semilogx(x,y, color='royalblue')
plt.vlines(2.5, -120, k(2.5), linestyle=':', color='brown')
plt.hlines(k(2.5), 0.01, 2.5, linestyle=':', color='brown')
plt.ylim(ymin=-120)
plt.xlim(xmin=0.1)
plt.ylabel('Resting membrane potential (mV)')
plt.xlabel('Log [K$^+$]')
plt.show()
| gpl-2.0 |
josephcslater/scipy | scipy/signal/filter_design.py | 14 | 135076 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
from scipy._lib._numpy_compat import polyvalfromroots
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients', 'freqs_zpk', 'freqz_zpk',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N, kind='ba'):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system, where the coefficients
are ordered from highest to lowest degree. Or, the roots of the
transfer function numerator and denominator (i.e. zeroes and poles).
N : int
The length of the array to be computed.
kind : str {'ba', 'zp'}, optional
Specifies whether the numerator and denominator are specified by their
polynomial coefficients ('ba'), or their roots ('zp').
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
if kind == 'ba':
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
elif kind == 'zp':
ep = atleast_1d(den) + 0j
tz = atleast_1d(num) + 0j
else:
raise ValueError("input must be one of {'ba', 'zp'}")
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqs_zpk(z, p, k, worN=None):
"""
Compute frequency response of analog filter.
Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its
frequency response::
(jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1])
H(w) = k * ----------------------------------------
(jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1])
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqz : Compute the frequency response of a digital filter in TF form
freqz_zpk : Compute the frequency response of a digital filter in ZPK form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy.signal import freqs_zpk, iirfilter
>>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1',
... output='zpk')
>>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
k = np.asarray(k)
if k.size > 1:
raise ValueError('k must be a single scalar gain')
if worN is None:
w = findfreqs(z, p, 200, kind='zp')
elif isinstance(worN, int):
N = worN
w = findfreqs(z, p, N, kind='zp')
else:
w = worN
w = atleast_1d(w)
s = 1j * w
num = polyvalfromroots(s, z)
den = polyvalfromroots(s, p)
h = k * num/den
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def freqz_zpk(z, p, k, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in ZPK form.
Given the Zeros, Poles and Gain of a digital filter, compute its frequency
response::
:math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])`
where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are
the `poles`.
Parameters
----------
z : array_like
Zeroes of a linear filter
p : array_like
Poles of a linear filter
k : scalar
Gain of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
See Also
--------
freqs : Compute the frequency response of an analog filter in TF form
freqs_zpk : Compute the frequency response of an analog filter in ZPK form
freqz : Compute the frequency response of a digital filter in TF form
Notes
-----
.. versionadded: 0.19.0
Examples
--------
>>> from scipy import signal
>>> z, p, k = signal.butter(4, 0.2, output='zpk')
>>> w, h = signal.freqz_zpk(z, p, k)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
z, p = map(atleast_1d, (z, p))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(1j * w)
h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
QudevETH/PycQED_py3 | pycqed/tests/test_unit_conversions.py | 1 | 1978 | import unittest
import numpy as np
import matplotlib.pyplot as plt
from pycqed.analysis.tools.plotting import SI_prefix_and_scale_factor
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
from pycqed.analysis.tools.plotting import SI_val_to_msg_str
class Test_SI_prefix_scale_factor(unittest.TestCase):
def test_non_SI(self):
unit = 'arb.unit.'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(unit, post_unit)
def test_SI_scale_factors(self):
unit = 'V'
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5, unit=unit)
self.assertEqual(scale_factor, 1)
self.assertEqual(' '+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=5000,
unit=unit)
self.assertEqual(scale_factor, 1/1000)
self.assertEqual('k'+unit, post_unit)
scale_factor, post_unit = SI_prefix_and_scale_factor(val=0.05,
unit=unit)
self.assertEqual(scale_factor, 1000)
self.assertEqual('m'+unit, post_unit)
class test_SI_unit_aware_labels(unittest.TestCase):
def test_label_scaling(self):
"""
This test creates a dummy plot and checks if the tick labels are
rescaled correctly
"""
f, ax = plt.subplots()
x = np.linspace(-6, 6, 101)
y = np.cos(x)
ax.plot(x*1000, y/1e5)
set_xlabel(ax, 'Distance', 'm')
set_ylabel(ax, 'Amplitude', 'V')
xlab = ax.get_xlabel()
ylab = ax.get_ylabel()
self.assertEqual(xlab, 'Distance (km)')
self.assertEqual(ylab, 'Amplitude (μV)')
def test_SI_val_to_msg_str(self):
val, unit = SI_val_to_msg_str(1030, 'm')
self.assertEqual(val, str(1.03))
self.assertEqual(unit, 'km')
| mit |
ishay2b/tensorflow | tensorflow/examples/learn/wide_n_deep_tutorial.py | 18 | 8111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mjgrav2001/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/mixture/tests/test_gmm.py | 48 | 17414 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| gpl-2.0 |
unnati-xyz/droidcon-twitter-analytics | geeksrus/utils/dbconn.py | 1 | 2020 | from pymongo import MongoClient
import pandas as pd
def connect_mongo(db, host='localhost', port=27017, username=None, password=None):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db_conn, collection, query={}, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Make a query to the specific DB and Collection
print(query)
cursor = db_conn[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def read_mongo_projection(db_conn, collection, query={}, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Make a query to the specific DB and Collection
cursor = db_conn[collection].find(projection=query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def write_mongo(db_conn, collection, document):
result = db_conn[collection].insert(document)
def find_and_sort_desc(db_conn, collection, field, no_id = True):
# Make a query to the specific DB and Collection
cursor = db_conn[collection].find().sort(field, -1)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def find_with_project_and_sort(db_conn, collection, field,query={}, no_id = True):
# Make a query to the specific DB and Collection
cursor = db_conn[collection].find(projection=query).sort(field, -1)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
| mit |
IndraVikas/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
CalebBell/thermo | tests/test_interaction_parameters.py | 1 | 3433 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2017 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from math import exp, log
import pytest
import numpy as np
import pandas as pd
from fluids.constants import calorie, R
from thermo.interaction_parameters import IPDB
from fluids.numerics import assert_close, assert_close1d, assert_close2d
def run_validate_db():
from thermo.interaction_parameters import ip_files
for name in ip_files.keys():
IPDB.validate_table(name)
def test_basic_chemsep_PR():
kij = IPDB.get_ip_specific('ChemSep PR', ['124-38-9', '67-56-1'], 'kij')
assert_close(kij, 0.0583)
kij_auto = IPDB.get_ip_automatic(['124-38-9', '67-56-1'], 'PR kij', 'kij')
assert_close(kij, kij_auto)
kij_missing = IPDB.get_ip_specific('ChemSep PR', ['1249-38-9', '67-56-1'], 'kij')
assert kij_missing == 0
assert False == IPDB.has_ip_specific('ChemSep PR', ['1249-38-9', '67-56-1'], 'kij')
assert True == IPDB.has_ip_specific('ChemSep PR', ['124-38-9', '67-56-1'], 'kij')
assert IPDB.get_tables_with_type('PR kij') == ['ChemSep PR']
# interaction parameter matrix
kij_C1C4 = IPDB.get_ip_symmetric_matrix('ChemSep PR', ['74-82-8', '74-84-0', '74-98-6', '106-97-8'], 'kij')
kij_C1C4_known = [[0.0, -0.0059, 0.0119, 0.0185],
[-0.0059, 0.0, 0.0011, 0.0089],
[0.0119, 0.0011, 0.0, 0.0033],
[0.0185, 0.0089, 0.0033, 0.0]]
assert_close2d(kij_C1C4, kij_C1C4_known)
# Test for asymetric works the same since the model is asymmetric
kij_C1C4 = IPDB.get_ip_symmetric_matrix('ChemSep PR', ['74-82-8', '74-84-0', '74-98-6', '106-97-8'], 'kij')
assert_close2d(kij_C1C4, kij_C1C4_known)
def test_basic_chemsep_NRTL():
# ethanol water, converted to metric, simple T dependence
bijs = IPDB.get_ip_asymmetric_matrix('ChemSep NRTL', ['64-17-5', '7732-18-5'], 'bij')
alphas_known = [[0.0, 0.2937, 0.3009], [0.2937, 0.0, 0.2999], [0.3009, 0.2999, 0.0]]
# Test is works both symmetric and asymmetric
alphas = IPDB.get_ip_asymmetric_matrix('ChemSep NRTL', ['64-17-5', '7732-18-5', '67-56-1'], 'alphaij')
assert_close2d(alphas, alphas_known)
alphas = IPDB.get_ip_symmetric_matrix('ChemSep NRTL', ['64-17-5', '7732-18-5', '67-56-1'], 'alphaij')
assert_close2d(alphas, alphas_known) | mit |
antoinecarme/pyaf | tests/model_control/test_ozone_custom_models_enabled.py | 1 | 1923 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
def pickleModel(iModel):
import pickle
output = pickle.dumps(iModel)
lReloadedObject = pickle.loads(output)
output2 = pickle.dumps(lReloadedObject)
assert(iModel.to_json() == lReloadedObject.to_json())
return lReloadedObject;
def build_model(transformations, trends, periodics, autoregs):
b1 = tsds.load_ozone_exogenous()
df = b1.mPastData
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.set_active_transformations(transformations);
lEngine.mOptions.set_active_trends(trends);
lEngine.mOptions.set_active_periodics(periodics);
lEngine.mOptions.set_active_autoregressions(autoregs);
lExogenousData = (b1.mExogenousDataFrame , b1.mExogenousVariables)
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine2 = pickleModel(lEngine)
lEngine2.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine2.standardPlots("outputs/my_ozone");
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine2.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/ozone_apply_out.csv")
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H));
print("\n\n<ModelInfo>")
print(lEngine2.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
| bsd-3-clause |
hurricane42/data | pew-religions/Religion-Leah.py | 37 | 3271 | #!/usr/bin/env python
import numpy as np
import pandas as pd
religions = ['Buddhist', 'Catholic', 'Evangel Prot', 'Hindu', 'Hist Black Prot', 'Jehovahs Witness', 'Jewish', 'Mainline Prot', 'Mormon', 'Muslim', 'Orthodox Christian', 'Unaffiliated']
csv = open("current.csv", 'w')
csv.truncate()
def write_row(matrix):
arr = np.asarray(matrix[0])[0]
row = ','.join([str(a) for a in arr]) + '\n'
csv.write(row)
# Intitial distribution of religions in US
first = np.matrix([.007, .208, .254, .007, .065, .008, .019, .147, .016, .009, .005, .228])
# Normed to sum to 100%
current = first / np.sum(first)
t0 = current
write_row(current)
# Transition matrix
trans = np.matrix(((0.390296314, 0.027141947, 0.06791021, 0.001857564, 0, 0, 0.011166082, 0.059762879, 0, 0, 0, 0.396569533),
(0.005370791, 0.593173325, 0.103151608, 0.000649759, 0.010486747, 0.005563864, 0.002041424, 0.053825329, 0.004760476, 0.001130529, 0.000884429, 0.199488989),
(0.00371836, 0.023900817, 0.650773331, 0.000250102, 0.016774503, 0.003098214, 0.001865491, 0.122807467, 0.004203107, 0.000186572, 0.002123778, 0.151866648),
(0, 0, 0.0033732, 0.804072618, 0, 0.001511151, 0, 0.01234639, 0, 0.00209748, 0, 0.17659916),
(0.002051357, 0.016851659, 0.09549708, 0, 0.699214315, 0.010620473, 0.000338804, 0.024372871, 0.000637016, 0.009406884, 0.000116843, 0.129892558),
(0, 0.023278276, 0.109573979, 0, 0.077957568, 0.336280578, 0, 0.074844833, 0.007624035, 0, 0, 0.35110361),
(0.006783201, 0.004082693, 0.014329604, 0, 0, 0.000610585, 0.745731278, 0.009587587, 0, 0, 0.002512334, 0.184058682),
(0.005770357, 0.038017215, 0.187857555, 0.000467601, 0.008144075, 0.004763516, 0.003601208, 0.451798506, 0.005753587, 0.000965543, 0.00109818, 0.25750798),
(0.007263135, 0.01684885, 0.06319935, 0.000248467, 0.0059394, 0, 0.001649896, 0.03464334, 0.642777489, 0.002606278, 0, 0.208904711),
(0, 0.005890381, 0.023573308, 0, 0.011510643, 0, 0.005518343, 0.014032084, 0, 0.772783807, 0, 0.15424369),
(0.004580353, 0.042045841, 0.089264134 , 0, 0.00527346, 0, 0, 0.061471387, 0.005979218, 0.009113978, 0.526728084, 0.243246723),
(0.006438308, 0.044866331, 0.1928814, 0.002035375, 0.04295005, 0.010833621, 0.011541439, 0.09457963, 0.01365141, 0.005884336, 0.002892072, 0.525359211)))
# Fertility array
fert = np.matrix(((2.1, 2.3, 2.3, 2.1, 2.5, 2.1, 2, 1.9, 3.4, 2.8, 2.1, 1.7)))
# Create data frame for printing later
religionDataFrame = pd.DataFrame()
for x in range(0,100):
### beginning of conversion step
# apply transition matrix to current distribution
current = current * trans
### beginning of fertility step
# divide by two for couple number
current = current/2
# adjust by fertility
current = np.multiply(fert, current)
# normalize to 100%
current = current / np.sum(current)
write_row(current)
# add to data frame
religionDataFrame = religionDataFrame.append(pd.DataFrame(current), ignore_index=True)
csv.close()
religionDataFrame.columns = religions
religionDataFrame.to_csv("current_pandas_save.csv")
| mit |
oesteban/seaborn | seaborn/rcmod.py | 4 | 16004 | """Functions that alter the matplotlib rc dictionary on the fly."""
from distutils.version import LooseVersion
import functools
import numpy as np
import matplotlib as mpl
from . import palettes
mpl_ge_150 = LooseVersion(mpl.__version__) >= '1.5.0'
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"figure.facecolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"font.size",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(mpl.rcParamsOrig)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"figure.facecolor": "white",
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"font.size": 12,
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
class _RCAesthetics(dict):
def __enter__(self):
rc = mpl.rcParams
self._orig = {k: rc[k] for k in self._keys}
self._set(self)
def __exit__(self, exc_type, exc_value, exc_tb):
self._set(self._orig)
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
class _AxesStyle(_RCAesthetics):
"""Light wrapper on a dict to set style temporarily."""
_keys = _style_keys
_set = staticmethod(set_style)
class _PlottingContext(_RCAesthetics):
"""Light wrapper on a dict to set context temporarily."""
_keys = _context_keys
_set = staticmethod(set_context)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
if mpl_ge_150:
from cycler import cycler
cyl = cycler('color', colors)
mpl.rcParams['axes.prop_cycle'] = cyl
else:
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
| bsd-3-clause |
tody411/NPR-SFS | npr_sfs/results/compare.py | 1 | 1859 | # -*- coding: utf-8 -*-
## @package npr_sfs.results.compare
#
# npr_sfs.results.compare utility package.
# @author tody
# @date 2015/09/01
import os
import matplotlib.pyplot as plt
from npr_sfs.results.results import batchResults, resultFile, resultDir
from npr_sfs.datasets.loader import loadData
from npr_sfs.io_util.image import loadRGBA
from npr_sfs.plot.window import showMaximize
batch_name="Compare"
_root_dir = os.path.dirname(__file__)
def methodNames():
dirs = os.listdir(_root_dir)
method_dirs = [metohd_dir for metohd_dir in dirs if os.path.isdir(metohd_dir) and metohd_dir != batch_name]
return method_dirs
def methodDir(method_name):
return os.path.join(_root_dir, method_name)
def methodFile(method_name, data_name):
return os.path.join(methodDir(method_name), data_name + ".png")
def batch_func(data_name):
method_names = methodNames()
NO_32F = loadData(data_name, loader_func=loadRGBA)
fig = plt.figure(figsize=(10, 4))
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.9, wspace=0.05, hspace=0.1)
font_size = 15
fig.suptitle("NPR-SFS", fontsize=font_size)
num_cols = len(method_names) + 1
fig.add_subplot(1, num_cols, 1)
plt.title("Ground truth", fontsize=font_size)
plt.imshow(NO_32F)
plt.axis('off')
col_id = 2
for method_name in method_names:
method_file = methodFile(method_name, data_name)
N_32F = loadRGBA(method_file)
fig.add_subplot(1, num_cols, col_id)
plt.title(method_name, fontsize=font_size)
plt.imshow(N_32F)
plt.axis('off')
col_id += 1
result_dir = resultDir(batch_name)
result_file = resultFile(result_dir, data_name)
plt.savefig(result_file)
if __name__ == '__main__':
print methodNames()
batchResults(batch_func, batch_name)
| mit |
dialounke/pylayers | pylayers/mobility/agent.py | 1 | 11549 | """
.. automodule::
:members:
Agent Class
==================
.. autoclass:: Agent
:members:
"""
from SimPy.SimulationRT import Simulation
#from simpy.simulation import *
from pylayers.mobility.transit.Person import Person
from pylayers.mobility.transit.World import world
from pylayers.mobility.transit.SteeringBehavior import Seek, Separation, Containment, InterpenetrationConstraint, queue_steering_mind
import numpy as np
import networkx as nx
import time
import ConfigParser
import pandas as pd
import pylayers.util.pyutil as pyu
from pylayers.network.network import Node, Network
from pylayers.network.communication import Gcom, TX, RX
from pylayers.location.localization import Localization, PLocalization
from pylayers.gis.layout import Layout
from pylayers.util.utilnet import *
#from pylayers.util.pymysqldb import Database
import pdb
""""
.. currentmodule:: pylayers.mobility.agent
.. autosummary::
:toctree: generated/
"""
class Agent(object):
""" Class Agent
Members
-------
args
ID
name
typ
net
epwr
gcom
sim
wstd
sens
dcond
meca : transit.Person
net : pylayers.network.Network
sim :
PN :
rxt
rxr
"""
def __init__(self, **args):
""" Mobile Agent Init
Parameters
----------
'ID': string
agent ID
'name': string
Agent name
'typ': string
agent typ . 'ag' for moving agent, 'ap' for static acces point
'pos' : np.array([])
numpy array containing the initial position of the agent
'roomId': int
Room number where the agent is initialized (Layout.Gr)
'meca_updt': float
update time interval for the mechanical process
'loc': bool
enable/disable localization process of the agent
'loc_updt': float
update time interval for localization process
'L': pylayers.gis.Layout()
'net':pylayers.network.Network(),
'wstd': list of string
list of used radio access techology of the agent
'world': transit.world()
Soon deprecated
'save': list of string
list of save method ( soon deprecated)
'sim':Simpy.SimulationRT.Simulation(),
'epwr': dictionnary
dictionnary of emmited power of transsmitter{'wstd#':epwr value}
'sens': dictionnary
dictionnary of sensitivity of reveicer {'wstd#':sens value}
'dcond': dictionnary
Not used yet
'gcom':pylayers.communication.Gcom()
Communication graph
'comm_mod': string
Communication between nodes mode:
'autonomous': all TOAs are refreshed regulary
'synchro' : only visilbe TOAs are refreshed
"""
defaults = {'ID': '0',
'name': 'johndoe',
'typ': 'ag',
'color': 'k',
'pdshow': False,
'pos': np.array([]),
'roomId': -1,
'froom': [],
'wait': [],
'seed': 0,
'cdest': 'random',
'meca_updt': 0.1,
'loc': False,
'loc_updt': 0.5,
'loc_method': ['geo'],
'L': Layout(),
'network': True,
'net': Network(),
'wstd': ['rat1'],
'world': world(),
'save': [],
'sim': Simulation(),
'epwr': {},
'sens': {},
'dcond': {},
'gcom': Gcom(),
'comm_mode': 'autonomous'}
for key, value in defaults.items():
if key not in args:
args[key] = value
self.args = args
self.ID = args['ID']
self.name = args['name']
self.typ = args['typ']
# Create Network
self.net = args['net']
self.epwr = args['epwr']
self.gcom = args['gcom']
self.sim = args['sim']
self.wstd = args['wstd']
if args['epwr'] == {}:
self.epwr = {x: 0 for x in self.wstd}
else:
self.epwr = args['epwr']
if args['sens'] == {}:
self.sens = {x: -180 for x in self.wstd}
else:
self.sens = args['sens']
try:
self.dcond = args['dcond']
except:
pass
# check if node id already given
if self.ID in self.net.nodes():
raise NameError(
'another agent has the ID: ' + self.ID + ' .Please use an other ID')
if self.typ == 'ag':
# mechanical init
self.meca = Person(ID=self.ID,
color=args['color'],
pdshow=args['pdshow'],
roomId=args['roomId'],
L=args['L'],
net=self.net,
interval=args['meca_updt'],
wld=args['world'],
sim=args['sim'],
seed=args['seed'],
moving=True,
froom=args['froom'],
wait=args['wait'],
cdest=args['cdest'],
save=args['save']
)
self.meca.behaviors = [Seek(), Containment(),
Separation(), InterpenetrationConstraint()]
self.meca.steering_mind = queue_steering_mind
# Network init
self.node = Node(ID=self.ID,name=self.name, p=conv_vecarr(self.meca.position),
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim.activate(self.meca, self.meca.move(), 0.0)
self.PN = self.net.node[self.ID]['PN']
# Communication init
if args['comm_mode'] == 'synchro' and args['network']:
# The TOA requests are made every refreshTOA time ( can be modified in agent.ini)
# This Mode will be deprecated in future version
self.rxr = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.rxt = RX(net=self.net,
ID=self.ID,
dcond=self.dcond,
gcom=self.gcom,
sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.rxt, self.rxt.refresh_TOA(), 0.0)
elif args['comm_mode'] == 'autonomous' and args['network']:
# The requests are made by node only when they are in
# visibility of pairs.
# self.rxr only manage a refresh RSS process
self.rxr = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx manage all requests to other nodes
self.tx = TX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
# self.tx replies to requests from self.tx
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rxr, self.rxr.refresh_RSS(), 0.0)
self.sim.activate(self.tx, self.tx.request(), 0.0)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
elif self.typ == 'ap':
if args['roomId'] == -1:
self.node = Node(ID=self.ID, p=self.args['pos'],
t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
else:
pp = np.array(args['L'].Gr.pos[self.args['roomId']])
self.node = Node(
ID=self.ID, p=pp, t=self.sim.now(), wstd=args['wstd'],
epwr=self.epwr, sens=self.sens, typ=self.typ)
self.net.add_nodes_from(self.node.nodes(data=True))
self.sim = args['sim']
self.PN = self.net.node[self.ID]['PN']
self.PN.node[self.ID]['pe'] = self.net.node[self.ID]['p']
if args['comm_mode'] == 'autonomous' and args['network']:
self.rx = RX(net=self.net, ID=self.ID,
gcom=self.gcom, sim=self.sim)
self.sim.activate(self.rx, self.rx.wait_request(), 0.0)
p = self.args['pos']
self.posdf = pd.DataFrame(
{'t': pd.Timestamp(0), 'x': p[0], 'y': p[1], 'z': p[2],
'vx': np.array([0.0]), 'vy': np.array([0.0]),
'ax': np.array([0.0]), 'ay': np.array([0.0]),
}, columns=['t', 'x', 'y', 'z', 'vx', 'vy', 'ax', 'ay'], index=np.array([0]))
else:
raise NameError(
'wrong agent typ, it must be either agent (ag) or acces point (ap) ')
if self.typ == 'ap':
self.MoA = 1
else:
self.MoA = 0
if 'mysql' in args['save']:
config = ConfigParser.ConfigParser()
config.read(pyu.getlong('simulnet.ini', 'ini'))
sql_opt = dict(config.items('Mysql'))
db = Database(sql_opt['host'], sql_opt['user'],
sql_opt['passwd'], sql_opt['dbname'])
db.writenode(self.ID, self.name, self.MoA)
if 'txt' in args['save']:
pyu.writenode(self)
if self.typ != 'ap' and args['loc']:
self.loc = Localization(net=self.net, ID=self.ID,
method=args['loc_method'])
self.Ploc = PLocalization(loc=self.loc,
loc_updt_time=args['loc_updt'],
tx=self.tx,
sim=args['sim'])
self.sim.activate(self.Ploc, self.Ploc.run(), 1.5)
def __repr__(self):
s = 'General Agent info \n********************\n'
s = s + 'name : ' + self.name + '\n'
s = s + 'ID: ' + self.ID + '\n'
s = s + 'typ: ' + self.typ
s = s + '\n\n More Agent information about:'
s = s + '\n+ Mecanichal => self.meca'
s = s + '\n+ Network => self.net'
s = s + '\n+ Personnal Network => self.PN'
s = s + '\n+ Localization => self.loc\n\n'
try:
s = s + self.PN.__repr__() + '\n\n'
except:
s = s + 'No network simulated'
if self.typ != 'ap':
s = s + self.meca.__repr__() + '\n\n'
try:
s = s + self.loc.__repr__() + '\n\n'
except:
s = s + 'no localization simulated'
return s
| mit |
cleverhans-lab/cleverhans | cleverhans_v3.1.0/scripts/plot_success_fail_curve.py | 1 | 1459 | #!/usr/bin/env python3
"""
Plots a success-fail curve ( https://openreview.net/forum?id=H1g0piA9tQ )
Usage:
plot_success_fail_curve.py model.joblib
plot_success_fail_curve.py model1.joblib model2.joblib
This script is mostly intended to rapidly visualize success-fail curves
during model development and testing.
To make nicely labeled plots formatted to fit the page / column of a
publication, you should probably write your own script that calls some
of the same plotting commands.
"""
from matplotlib import pyplot
import tensorflow as tf
from cleverhans.utils_tf import silence
silence()
# silence call must precede this imports. pylint doesn't like that
# pylint: disable=C0413
from cleverhans.compat import flags
from cleverhans.plot.success_fail import DEFAULT_FAIL_NAMES
from cleverhans.plot.success_fail import plot_report_from_path
FLAGS = flags.FLAGS
def main(argv=None):
"""Takes the path to a directory with reports and renders success fail plots."""
report_paths = argv[1:]
fail_names = FLAGS.fail_names.split(",")
for report_path in report_paths:
plot_report_from_path(report_path, label=report_path, fail_names=fail_names)
pyplot.legend()
pyplot.xlim(-0.01, 1.0)
pyplot.ylim(0.0, 1.0)
pyplot.show()
if __name__ == "__main__":
flags.DEFINE_string(
"fail_names",
",".join(DEFAULT_FAIL_NAMES),
"Names of adversarial datasets for failure rate",
)
tf.app.run()
| mit |
d-meiser/lindblad | examples/scanEIT.py | 1 | 3959 | #!/usr/bin/python
import sys
import subprocess
import numpy as np
try:
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
print "matplotlib not available"
have_matplotlib = False
try:
import joblib
from multiprocessing import cpu_count
cpuNum = cpu_count()
# use maximum number of cores available
if cpuNum > 1:
n_jobs = cpuNum
print "Using joblib with", n_jobs, "processes."
else:
n_jobs = 1
print "Using joblib with 1 process."
have_joblib = True
except ImportError:
print "joblib not available."
have_joblib = False
if (len(sys.argv) == 2):
# one argument has been provided for output plot name
outputFilename = str(sys.argv[1])
elif (len(sys.argv) == 1):
# no filename provided, use default
outputFilename = 'test.png'
else:
# improper usage, use default and inform
print("Usage: python scanEIT-SmallRange.py filename")
outputFilename = 'test.png'
def get_steady_state(arguments = None):
if arguments:
output = subprocess.check_output(["./SteadyStateEIT"] +
[str(a) for a in arguments])
else:
output = subprocess.check_output(["./SteadyStateEIT"])
density_matrix = np.matrix(output)
density_matrix = density_matrix.reshape(4, 8)
density_matrix = density_matrix[:, 0:8:2] + 1.0j * density_matrix[:,1:8:2]
return density_matrix
def get_polarization(arguments = None):
density_matrix = get_steady_state(arguments)
return [density_matrix[0, 3] , density_matrix[2, 3]]
def absorption(polarizationArray):
# post-process density matrix elements
# to get absorption from atomic polarization
return polarizationArray[:,0].imag - polarizationArray[:,1].imag
def rotation(polarizationArray):
# post-process density matrix elements
# to get light polarization rotation from atomic polarization
return polarizationArray[:,0].real + polarizationArray[:,1].real
def compute_polarization(OmegaR, OmegaBs, Delta, gamma, Gamma, deltaB):
if have_joblib:
polarizations = joblib.Parallel(n_jobs = n_jobs)(
joblib.delayed(get_polarization)(
[OmegaR, ob, Delta, gamma, Gamma, deltaB])
for ob in OmegaBs)
else:
polarizations = [get_polarization([OmegaR, ob, Delta, gamma, Gamma, deltaB]) for ob in OmegaBs]
polarizations = np.array(polarizations)
return polarizations
def main(argv):
Bunits = 700e3 * 2.0 * np.pi
OmegaR = 1.25e6 * 2.0 * np.pi
gamma = 1.0e3 * 2.0 * np.pi
Gamma = 6.0e6 * 2.0 * np.pi
Delta = 0.0 * Gamma;
OmegaB = np.arange(-0.1, 0.1, 0.0005) * Bunits
deltaB = Bunits * 0.01
NonZeroPolarization = compute_polarization(OmegaR, OmegaB, Delta, gamma, Gamma, deltaB)
absorptionNonZeroField = absorption(NonZeroPolarization)
rotationNonZeroField = rotation(NonZeroPolarization)
deltaB = Bunits * 0.0
ZeroPolarization = compute_polarization(OmegaR, OmegaB, Delta, gamma, Gamma, deltaB)
absorptionZeroField = absorption(ZeroPolarization)
rotationZeroField = rotation(ZeroPolarization)
if not have_matplotlib:
return
plt.subplot(2,1,1)
plt.plot(OmegaB / (Bunits), 1.0e3 * absorptionZeroField)
plt.plot(OmegaB / (Bunits), 1.0e3 *
absorptionNonZeroField,'--r')
plt.ylabel(r'${\rm Absorption\; [arb. units]}$')
plt.subplot(2,1,2)
plt.plot(OmegaB / (Bunits), 1.0e3 * rotationZeroField)
plt.plot(OmegaB / (Bunits), 1.0e3 *
rotationNonZeroField,'--r')
plt.xlabel(r'$B_z({\rm G})$')
plt.ylabel(r'${\rm Faraday Rotation\; [arb. units]}$')
plt.gcf().set_size_inches(4, 6)
plt.gcf().subplots_adjust(bottom = 0.1, left = 0.2, top = 0.97, right = 0.95)
plt.savefig(outputFilename,format='png')
if __name__== "__main__":
main(sys.argv)
| gpl-3.0 |
NunoEdgarGub1/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
OshynSong/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
anna-effeindzourou/trunk | examples/anna_scripts/triax/triaxial_uniformrm.py | 1 | 11635 | # -*- coding: utf-8
from yade import ymport, utils,pack,export
import gts,os
from yade import geom
#import matplotlib
from yade import plot
#from pylab import *
#import os.path, locale
#### set False when running in batch mode
#defaultTable = True
defaultTable = False
####-------------------------------------
####-------------------------------------
utils.readParamsFromTable(
rm = 0.33,
noTableOk = True
)
from yade.params.table import *
print 'rm=',rm
O.tags['description']='triaxial_rm_'+str(rm)
#################################
##### FUNCTIONS ####
#################################
def hMax(n):
idHMax=0
hMax=-1000000.0
for i in O.bodies:
h=i.state.pos[n]
if (h>hMax):
hMax=h
idHMax=i.id
hMax=hMax+O.bodies[idHMax].shape.radius
return (hMax)
def hMin(n):
idHMin=0
hMin=100000.0
for i in O.bodies:
h=i.state.pos[n]
if (h<hMin):
hMin=h
idHMin=i.id
hMin=hMin-O.bodies[idHMin].shape.radius
return (hMin)
#Function in order to calculate rmin (minimum radius) and rmax (maximum radius)
def MinMax():
rmax=0
rmin=10
r=0
for i in O.bodies:
if(type(i.shape)==Sphere):
r=i.shape.radius
if(r>rmax):
rmax=r
if(r<rmin):
rmin=r
l=[rmin,rmax]
return (l)
def sup():
for i in O.bodies:
if (type(i.shape)==Sphere) and (i.state.pos[2]>0.098):
O.bodies.erase(i.id)
def scalar(u,v):
ps=u[0]*v[0]+u[1]*v[1]+u[2]*v[2]
return ps
def cross(u,v):
ps=Vector3(u[1]*v[2]-u[2]*v[1], u[2]*v[0]-u[0]*v[2] ,u[0]*v[1]-u[1]*v[0])
return ps
def limitfinder():
for b in O.bodies:
if(b.state.pos[2]>=L-2*radius):
if isinstance(b.shape,GridNode):
top_boundary.append(b.id)
b.shape.color=(1,0,0)
b.state.blockedDOFs='z'
if(b.state.pos[2]<0.1*radius ):
if isinstance(b.shape,GridNode):
bottom_boundary.append(b.id)
b.state.blockedDOFs='z'
b.shape.color=(1,0,0)
##############################
##### SCRIPT ####
##############################
try:
os.mkdir('data')
except:
pass
try:
os.mkdir('paraview')
except:
pass
isBatch = runningInBatch()
####################
### ENGINES ###
####################
O.engines=[
ForceResetter(),
InsertionSortCollider([
Bo1_Sphere_Aabb(),
Bo1_Wall_Aabb(),
Bo1_PFacet_Aabb(),
Bo1_Facet_Aabb(),
]),
InteractionLoop([
Ig2_GridNode_GridNode_GridNodeGeom6D(),
Ig2_GridConnection_GridConnection_GridCoGridCoGeom(),
Ig2_Sphere_PFacet_ScGridCoGeom(),
Ig2_Sphere_Sphere_ScGeom(),
Ig2_Facet_Sphere_ScGeom(),
Ig2_Wall_Sphere_ScGeom()
],
[Ip2_CohFrictMat_CohFrictMat_CohFrictPhys(setCohesionNow=True,setCohesionOnNewContacts=True),
Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom6D_CohFrictPhys_CohesionMoment(),
Law2_ScGeom_FrictPhys_CundallStrack(),
Law2_ScGridCoGeom_FrictPhys_CundallStrack(),
Law2_GridCoGridCoGeom_FrictPhys_CundallStrack()
]
),
]
######################
### PROPERTIES ###
######################
radius=0.0025*rm
sigma=-3e6
#### Parameters of a rectangular grid ###
L=0.205 #length [m]
l=0.101/2. #half width (radius) [m]
nbL=36#number of nodes for the length [#] doit etre paire
nbl=44 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
#nbL=1 #number of nodes for the length [#] doit etre paire
#nbl=4 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
r=radius
color=[155./255.,155./255.,100./255.]
oriBody = Quaternion(Vector3(0,0,1),(pi/2))
nodesIds=[]
nodesIds1=[]
cylIds=[]
pfIds=[]
top_boundary=[]
bottom_boundary=[]
####################
### MATERIAL ###
####################
poisson=0.28
E=2*7.9e10*(1+poisson) ##1e11
density=7.8e10
Et=0
Emem=E/1e3
frictionAngle=0.096
frictionAngleW=0.228
O.materials.append(CohFrictMat(young=Emem,poisson=poisson,density=density,frictionAngle=0,normalCohesion=1e19,shearCohesion=1e19,momentRotationLaw=False,alphaKr=0,label='NodeMat'))
O.materials.append(FrictMat(young=Emem,poisson=poisson,density=density,frictionAngle=0,label='memMat'))
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wallmat'))
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Smat'))
##############################
### SAMPLE GENERATION ###
##############################
kw={'color':[0.6,0.6,0.6],'wire':False,'dynamic':True,'material':3}
#pile=ymport.text('spheres.txt',**kw)
#pile2=O.bodies.append(pile)
#sup()
#print hMin(2), hMax(2)
#zmin=hMin(2)
#zmax=hMax(2)
##L=hMax(2)
##################################
##### MEMBRANE GENERATION ###
##################################
#mesh=2
#Create all nodes first :
for i in range(0,nbL+1):
for j in range(0,nbl):
z=i*L/float(nbL)
y=l*sin(2*pi*j/float(nbl))
x=l*cos(2*pi*j/float(nbl))
nodesIds.append( O.bodies.append(gridNode([x,y,z],r,wire=False,fixed=False,material='NodeMat',color=color)) )
##Create connection between the nodes
for i in range(0,nbL+1):
for j in range(0,nbl-1):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='memMat') )
for i in range(0,nbL,1):
for j in range(0,nbl):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],r,color=color,mask=5,material='memMat') )
for i in range(-1,nbL):
j=nbl
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='memMat') )
for i in range(0,nbL):
for j in range(0,nbl-1):
if (j%2==0):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],r,color=color,mask=5,material='memMat') )
else:
O.bodies.append( gridConnection(nodesIds[(i+1)*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='memMat') )
for i in range(0,nbL):
j=nbl
#O.bodies[nodesIds[(i-1)*nbl+j]].shape.color=Vector3(155./255.,155./255.,1.)
#O.bodies[nodesIds[(i)*nbl+j-1]].shape.color=Vector3(1,0,0)
O.bodies.append( gridConnection(nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='memMat') )
###Create PFacets
##wire=True
for i in range(0,nbL):
for j in range(0,nbl-1):
if (j%2==0):
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='memMat')))
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='memMat')))
else:
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='memMat')))
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j+1],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='memMat')))
for i in range(0,nbL,1):
j=nbl
pfIds.append(O.bodies.append(pfacet( nodesIds[i*nbl+j],nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],color=color,material='memMat' )))
pfIds.append(O.bodies.append(pfacet( nodesIds[(i)*nbl+j-1],nodesIds[(i+1)*nbl+j-1],nodesIds[(i-1)*nbl+j],color=color,material='memMat' )))
limitfinder()
#########################
##### WALL GENERATION ##
#########################
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wallmat'))
topPlate=utils.wall(position=hMax(2)+radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wallmat')
O.bodies.append(topPlate)
bottomPlate=utils.wall(position=-hMin(2)-radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wallmat')
O.bodies.append(bottomPlate)
###################
#### APPLY LOAD ##
###################
normalVEL=0
loading=True
S0=pi*l**2
normalSTRESS=sigma
shearing=False
sigmaN=0
#### APPLY CONFINING PRESSURE
def Apply_load():
global sigmaN, Fn, top, load,shearing,loading,u
Fn=abs(O.forces.f(topPlate.id)[2])
sigmaN=Fn/S0
if abs((normalSTRESS-sigmaN)/normalSTRESS)<0.001:
topPlate.state.vel[2]=0
def Apply_confiningpressure():
#print 'Apply_confiningpressure'
for i in pfIds:
e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
P=(O.bodies[i].shape.node1.state.pos+O.bodies[i].shape.node2.state.pos+O.bodies[i].shape.node3.state.pos)/3
#print e0,e1,e2
#nodesIds.append( O.bodies.append(gridNode([P[0],P[1],P[2]],r,wire=False,fixed=True,material='NodeMat',color=color)) )
#print 'P=',P
v0 = e0
v1 = e1
v2 = P - O.bodies[i].shape.node1.state.pos
##// Compute dot products
dot00 = scalar(v0,v0)
dot01 = scalar(v0,v1)
dot02 = scalar(v0,v2)
dot11 = scalar(v1,v1)
dot12 = scalar(v1,v2)
##// Compute the barycentric coordinates of the projection P
invDenom = 1 / (dot00 * dot11 - dot01 * dot01)
p1 = (dot11 * dot02 - dot01 * dot12) * invDenom
p2 = (dot00 * dot12 - dot01 * dot02) * invDenom
p3 = 1-p1-p2
a = sqrt(scalar(e0,e0))
b = sqrt(scalar(e1,e1))
c = sqrt(scalar(e2,e2))
s=0.5*(a+b+c)
area= sqrt(s*(s-a)*(s-b)*(s-c))
Fapplied=area*sigma
normal = cross(e0,e1)
normal=normal/normal.norm()
F=Fapplied
p1normal=F*p1*normal
p2normal=F*p2*normal
p3normal=F*p3*normal
O.forces.addF(O.bodies[i].shape.node1.id,p1normal,permanent=False)
O.forces.addF(O.bodies[i].shape.node2.id,p2normal,permanent=False)
O.forces.addF(O.bodies[i].shape.node3.id,p3normal,permanent=False)
#Apply_confiningpressure()
sigma3=0
def check_confiningpressure():
global sigma3
sigma3=0
for i in pfIds:
e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
a = sqrt(scalar(e0,e0))
b = sqrt(scalar(e1,e1))
c = sqrt(scalar(e2,e2))
s=0.5*(a+b+c)
area= sqrt(s*(s-a)*(s-b)*(s-c))
F=(O.forces.f(O.bodies[i].shape.node1.id) + O.forces.f(O.bodies[i].shape.node2.id)+O.forces.f(O.bodies[i].shape.node3.id)).norm()
sigma3=sigma3+F/area
#print sigma3
return sigma3
pos=topPlate.state.pos[2]
def dataCollector():
global pos,p,q,sigma1
#if(pos<0.16):
#O.wait()
#saveData()
#O.exitNoBacktrace()
S=pi*l**2
Fnt=O.forces.f(topPlate.id)[2]
Fnb=O.forces.f(bottomPlate.id)[2]
sigma1=Fnt/S
sigma3=check_confiningpressure()
pos=topPlate.state.pos[2]
q=(sigma1-3e6)
p=(sigma1+2*3e6)/2
plot.addData(t=O.time,pos=pos,Fnt=Fnt,Fnb=Fnb,sigma1=sigma1,sigma3=sigma3,unbF=unbalancedForce(),p=p,q=q)
def saveData():
plot.saveDataTxt('data/'+O.tags['description']+'.dat',vars=('t','pos','Fnt','Fnb','sigma1','sigma3','unbF'))
plot.plots={'p':('q')}
#### MOVE TOP AND BOTTOM WALL
#v=1.7e-03
v=1.7e-05
def moveWall(v):
topPlate.state.vel=(0,0,-v)
#bottomPlate.state.vel=(0,0,v)
#g=-9.81
g=0
#moveWall(v)
#limitfinder()
###########################
##### ENGINE DEFINITION ##
###########################
O.dt=0.5*PWaveTimeStep()
O.engines=O.engines+[
#PyRunner(iterPeriod=1,initRun=True,command='Apply_load()'),
PyRunner(iterPeriod=1,dead=False,command='Apply_confiningpressure()'),
#PyRunner(iterPeriod=1,initRun=True,command='Apply_load()'),
NewtonIntegrator(damping=0.7,gravity=(0,0,g),label='Newton'),
#PyRunner(initRun=True,iterPeriod=1,command='dataCollector()'),
#VTKRecorder(iterPeriod=500,initRun=True,fileName='paraview/'+O.tags['description']+'_',recorders=['spheres','velocity']),
]
if not isBatch:
# VISUALIZATION
from yade import qt
qt.Controller()
#qtv = qt.View()
#qtr = qt.Renderer()
plot.plot(noShow=False, subPlots=True)
#O.run(5000)
#moveWall(v)
else:
O.run(1,True)
moveWall(v)
O.wait()
saveData() | gpl-2.0 |
gotomypc/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
dkriegner/xrayutilities | lib/xrayutilities/materials/material.py | 1 | 66506 | # This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009 Eugen Wintersberger <[email protected]>
# Copyright (C) 2009-2020 Dominik Kriegner <[email protected]>
# Copyright (C) 2012 Tanja Etzelstorfer <[email protected]>
"""
Classes decribing materials. Materials are devided with respect to their
crystalline state in either Amorphous or Crystal types. While for most
materials their crystalline state is defined few materials are also included as
amorphous which can be useful for calculation of their optical properties.
"""
import abc
import copy
import numbers
import operator
import re
import warnings
from math import ceil, copysign, isclose
import numpy
import scipy.optimize
from .. import config, math, utilities
from ..exception import InputError
from ..math import VecCross, VecDot, VecNorm
from . import cif, elements
from .atom import Atom
from .spacegrouplattice import WyckoffBase
numpy.seterr(divide='ignore', invalid='ignore')
map_ijkl2ij = {"00": 0, "11": 1, "22": 2,
"12": 3, "20": 4, "01": 5,
"21": 6, "02": 7, "10": 8}
map_ij2ijkl = {"0": [0, 0], "1": [1, 1], "2": [2, 2],
"3": [1, 2], "4": [2, 0], "5": [0, 1],
"6": [2, 1], "7": [0, 2], "8": [1, 0]}
def index_map_ijkl2ij(i, j):
return map_ijkl2ij["%i%i" % (i, j)]
def index_map_ij2ijkl(ij):
return map_ij2ijkl["%i" % ij]
def Cij2Cijkl(cij):
"""
Converts the elastic constants matrix (tensor of rank 2) to
the full rank 4 cijkl tensor.
Parameters
----------
cij : array-like
(6, 6) cij matrix
Returns
-------
cijkl ndarray
(3, 3, 3, 3) cijkl tensor as numpy array
"""
# first have to build a 9x9 matrix from the 6x6 one
m = numpy.zeros((9, 9), dtype=numpy.double)
m[0:6, 0:6] = cij[:, :]
m[6:9, 0:6] = cij[3:6, :]
m[0:6, 6:9] = cij[:, 3:6]
m[6:9, 6:9] = cij[3:6, 3:6]
# now create the full tensor
cijkl = numpy.empty((3, 3, 3, 3), dtype=numpy.double)
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
for n in range(0, 3):
mi = index_map_ijkl2ij(i, j)
mj = index_map_ijkl2ij(k, n)
cijkl[i, j, k, n] = m[mi, mj]
return cijkl
def Cijkl2Cij(cijkl):
"""
Converts the full rank 4 tensor of the elastic constants to
the (6, 6) matrix of elastic constants.
Parameters
----------
cijkl ndarray
(3, 3, 3, 3) cijkl tensor as numpy array
Returns
-------
cij : array-like
(6, 6) cij matrix
"""
cij = numpy.empty((6, 6), dtype=numpy.double)
for i in range(6):
for j in range(6):
ij = index_map_ij2ijkl(i)
kl = index_map_ij2ijkl(j)
cij[i, j] = cijkl[ij[0], ij[1], kl[0], kl[1]]
return cij
class Material(utilities.ABC):
"""
base class for all Materials. common properties of amorphous and
crystalline materials are described by this class from which Amorphous and
Crystal are derived from.
"""
def __init__(self, name, cij=None):
if cij is None:
self.cij = numpy.zeros((6, 6), dtype=numpy.double)
self.cijkl = numpy.zeros((3, 3, 3, 3), dtype=numpy.double)
elif isinstance(cij, (tuple, list, numpy.ndarray)):
self.cij = numpy.asarray(cij, dtype=numpy.double)
self.cijkl = Cij2Cijkl(self.cij)
else:
raise TypeError("Elastic constants must be a list or numpy array!")
self.name = name
self.transform = None
self._density = None
def __getattr__(self, name):
if name.startswith("c"):
index = name[1:]
if len(index) > 2:
raise AttributeError("Cij indices must be between 1 and 6")
i = int(index[0])
j = int(index[1])
if i > 6 or i < 1 or j > 6 or j < 1:
raise AttributeError("Cij indices must be between 1 and 6")
if callable(self.transform):
cij = Cijkl2Cij(self.transform(Cij2Cijkl(self.cij)))
else:
cij = self.cij
return cij[i - 1, j - 1]
else:
object.__getattribute__(self, name)
def _getmu(self):
return self.cij[3, 3]
def _getlam(self):
return self.cij[0, 1]
def _getnu(self):
return self.lam / 2. / (self.mu + self.lam)
def _getdensity(self):
return self._density
density = property(_getdensity)
mu = property(_getmu)
lam = property(_getlam)
nu = property(_getnu)
@abc.abstractmethod
def delta(self, en='config'):
"""
abstract method which every implementation of a Material has to
override
"""
pass
@abc.abstractmethod
def ibeta(self, en='config'):
"""
abstract method which every implementation of a Material has to
override
"""
pass
def chi0(self, en='config'):
"""
calculates the complex chi_0 values often needed in simulations.
They are closely related to delta and beta
(n = 1 + chi_r0/2 + i*chi_i0/2 vs. n = 1 - delta + i*beta)
"""
return (-2 * self.delta(en) + 2j * self.ibeta(en))
def idx_refraction(self, en="config"):
"""
function to calculate the complex index of refraction of a material
in the x-ray range
Parameters
----------
en : energy of the x-rays, if omitted the value from the
xrayutilities configuration is used
Returns
-------
n (complex)
"""
n = 1. - self.delta(en) + 1.j * self.ibeta(en)
return n
def critical_angle(self, en='config', deg=True):
"""
calculate critical angle for total external reflection
Parameters
----------
en : float or str, optional
energy of the x-rays in eV, if omitted the value from the
xrayutilities configuration is used
deg : bool, optional
return angle in degree if True otherwise radians (default:True)
Returns
-------
float
Angle of total external reflection
"""
rn = 1. - self.delta(en)
alphac = numpy.arccos(rn)
if deg:
alphac = numpy.degrees(alphac)
return alphac
def absorption_length(self, en='config'):
"""
wavelength dependent x-ray absorption length defined as
mu = lambda/(2*pi*2*beta) with lambda and beta as the x-ray
wavelength and complex part of the refractive index respectively.
Parameters
----------
en : float or str, optional
energy of the x-rays in eV
Returns
-------
float
the absorption length in um
"""
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
return utilities.en2lam(en) / (2 * numpy.pi * self.ibeta(en) * 2) / 1e4
def __str__(self):
ostr = "%s: %s\n" % (self.__class__.__name__, self.name)
if numpy.any(self.cij):
ostr += "Elastic tensor (6x6):\n"
d = numpy.get_printoptions()
numpy.set_printoptions(precision=2, linewidth=78, suppress=False)
ostr += str(self.cij) + '\n'
numpy.set_printoptions(**d)
return ostr
class Amorphous(Material):
"""
amorphous materials are described by this class
"""
def __init__(self, name, density, atoms=None, cij=None):
"""
constructor of an amorphous material. The amorphous material is
described by its density and atom composition.
Parameters
----------
name : str
name of the material. To allow automatic parsing of the chemical
elements use the abbreviation of the chemical element from the
periodic table. To specify alloys, use e.g. 'Ir0.2Mn0.8' or 'H2O'.
density : float
mass density in kg/m^3
atoms : list, optional
list of atoms together with their fractional content. When the
name is a simply chemical formula then this can be None. To
specify more complicated materials use [('Ir', 0.2), ('Mn', 0.8),
...]. Instead of the elements as string you can also use an Atom
object. If the contents to not add up to 1 they will be normalized
without notice.
cij : array-like, optional
elasticity matrix
"""
super().__init__(name, cij)
self._density = density
self.base = list()
if atoms is None:
comp = Amorphous.parseChemForm(name)
if config.VERBOSITY >= config.DEBUG:
print("XU.materials.Amorphous: using '%s' as chemical formula"
% ''.join(['%s%.2f ' % (e.name, c) for e, c in comp]))
for (e, c) in comp:
self.base.append((e, c))
else:
frsum = numpy.sum([at[1] for at in atoms])
for at, fr in atoms:
if not isinstance(at, Atom):
a = getattr(elements, at)
else:
a = at
self.base.append((a, fr/frsum))
@staticmethod
def parseChemForm(cstring):
"""
Parse a string containing a simple chemical formula and transform it to
a list of elements together with their relative atomic fraction. e.g.
'H2O' -> [(H, 2/3), (O, 1/3)], where H and O are the Element objects of
Hydrogen and Oxygen. Note that every chemical element needs to start
with a capital letter! Complicated formulas containing bracket are not
supported!
Parameters
----------
cstring : str
string containing the chemical fomula
Returns
-------
list of tuples
chemical element and atomic fraction
"""
if re.findall(r'[\(\)]', cstring):
raise ValueError('unsupported chemical formula (%s) given.'
% cstring)
elems = re.findall('[A-Z][^A-Z]*', cstring)
r = re.compile(r"([a-zA-Z]+)([0-9\.]+)")
ret = []
csum = 0
for e in elems:
if r.match(e):
elstr, cont = r.match(e).groups()
cont = float(cont)
else:
elstr, cont = (e, 1.0)
ret.append((elstr, cont))
csum += cont
for i, r in enumerate(ret):
ret[i] = (getattr(elements, r[0]), r[1]/csum)
return ret
def _get_f(self, q, en):
"""
optimized method to calculate the atomic scattering factor for all
atoms in the unit cell by calling the database only as much as needed.
Parameters
----------
q : float or array-like
momentum transfer for which the atomic scattering factor should be
calculated
en : float or str
x-ray energy (eV)
Returns
-------
list
atomic scattering factors for every atom in the unit cell
"""
f = {}
for at, occ in self.base:
if at.num not in f:
f[at.num] = at.f(q, en)
return [f[a.num] for a, o in self.base]
def delta(self, en='config'):
"""
function to calculate the real part of the deviation of the
refractive index from 1 (n=1-delta+i*beta)
Parameters
----------
en : float, array-like or str, optional
energy of the x-rays in eV
Returns
-------
float or array-like
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
lam = utilities.en2lam(en)
delta = 0.
m = 0.
f = self._get_f(0., en)
for (at, occ), fa in zip(self.base, f):
delta += numpy.real(fa) * occ
m += at.weight * occ
delta *= re / (2 * numpy.pi) * lam ** 2 / (m / self.density) * 1e-30
return delta
def ibeta(self, en='config'):
"""
function to calculate the imaginary part of the deviation
of the refractive index from 1 (n=1-delta+i*beta)
Parameters
----------
en : float, array-like or str, optional
energy of the x-rays in eV
Returns
-------
float or array-like
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
lam = utilities.en2lam(en)
beta = 0.
m = 0.
f = self._get_f(0., en)
for (at, occ), fa in zip(self.base, f):
beta += numpy.imag(fa) * occ
m += at.weight * occ
beta *= re / (2 * numpy.pi) * lam ** 2 / (m / self.density) * 1e-30
return beta
def chi0(self, en='config'):
"""
calculates the complex chi_0 values often needed in simulations.
They are closely related to delta and beta
(n = 1 + chi_r0/2 + i*chi_i0/2 vs. n = 1 - delta + i*beta)
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
lam = utilities.en2lam(en)
beta = 0.
delta = 0.
m = 0.
f = self._get_f(0., en)
for (at, occ), f0 in zip(self.base, f):
beta += numpy.imag(f0) * occ
delta += numpy.real(f0) * occ
m += at.weight * occ
beta *= re / (2 * numpy.pi) * lam ** 2 / (m / self.density) * 1e-30
delta *= re / (2 * numpy.pi) * lam ** 2 / (m / self.density) * 1e-30
return (-2 * delta + 2j * beta)
def __str__(self):
ostr = super().__str__()
ostr += "density: %.2f\n" % self.density
if self.base:
ostr += "atoms: "
for at, o in self.base:
ostr += "(%s, %.3f) " % (at.name, o)
ostr += "\n"
return ostr
class Crystal(Material):
"""
Crystalline materials are described by this class
"""
def __init__(self, name, lat, cij=None, thetaDebye=None):
super().__init__(name, cij)
self.lattice = lat
if isinstance(thetaDebye, numbers.Number):
self.thetaDebye = float(thetaDebye)
else:
self.thetaDebye = thetaDebye
@classmethod
def fromCIF(cls, ciffilestr, **kwargs):
"""
Create a Crystal from a CIF file. The default data-set from the cif
file will be used to create the Crystal.
Parameters
----------
ciffilestr : str, bytes
filename of the CIF file or string representation of the CIF file
kwargs : dict
keyword arguments are passed to the init-method of CIFFile
Returns
-------
Crystal
"""
cf = cif.CIFFile(ciffilestr, **kwargs)
lat = cf.SGLattice()
return cls(cf.data[cf._default_dataset].name, lat)
def loadLatticefromCIF(self, ciffilestr):
"""
load the unit cell data (lattice) from the CIF file. Other material
properties stay unchanged.
Parameters
----------
ciffilestr : str, bytes
filename of the CIF file or string representation of the CIF file
"""
cf = cif.CIFFile(ciffilestr)
self.lattice = cf.SGLattice()
def toCIF(self, ciffilename):
"""
Export the Crystal to a CIF file.
Parameters
----------
ciffilename : str
filename of the CIF file
"""
cif.cifexport(ciffilename, self)
@property
def a(self):
return self.lattice.a
@property
def b(self):
return self.lattice.b
@property
def c(self):
return self.lattice.c
@property
def alpha(self):
return self.lattice.alpha
@property
def beta(self):
return self.lattice.beta
@property
def gamma(self):
return self.lattice.gamma
@property
def a1(self):
return self.lattice._ai[0, :]
@property
def a2(self):
return self.lattice._ai[1, :]
@property
def a3(self):
return self.lattice._ai[2, :]
@property
def B(self):
return self.lattice._qtransform.matrix
def __eq__(self, other):
"""
compare if another Crystal instance is equal to the current one.
Currently this considers only the lattice to be equal. Additional
parameters like thetaDebye and the eleastic parameters are ignored.
Parameters
----------
other: Crystal
another instance of Crystal to compare
"""
return self.lattice == other.lattice
def Q(self, *hkl):
"""
Return the Q-space position for a certain material.
Parameters
----------
hkl : list or array-like
Miller indices (or Q(h, k, l) is also possible)
"""
return self.lattice.GetQ(*hkl)
def HKL(self, *q):
"""
Return the HKL-coordinates for a certain Q-space position.
Parameters
----------
q : list or array-like
Q-position. its also possible to use HKL(qx, qy, qz).
"""
return self.lattice.GetHKL(*q)
def chemical_composition(self, natoms=None, with_spaces=False, ndigits=2):
"""
determine chemical composition from occupancy of atomic positions.
Parameters
----------
mat : Crystal
instance of Crystal
natoms : int, optional
number of atoms to normalize the formula, if None some automatic
normalization is attempted using the greatest common divisor of the
number of atoms per unit cell. If the number of atoms of any
element is fractional natoms=1 is used.
with_spaces : bool, optional
add spaces between the different entries in the output string for
CIF combatibility
ndigits : int, optional
number of digits to which floating point numbers are rounded to
Returns
-------
str
representation of the chemical composition
"""
elem = {}
for a in self.lattice.base():
e = a[0].name
occ = a[2]
if e in elem:
elem[e] += occ
else:
elem[e] = occ
natom = sum([elem[e] for e in elem])
isint = True
for e in elem:
if not float(elem[e]).is_integer():
isint = False
# determine number of atoms
if not natoms:
if isint:
gcd = math.gcd([int(elem[e]) for e in elem])
natoms = natom/gcd
else:
natoms = 1
# generate output strig
cstr = ''
fmtstr = '%d' if isint else '%%.%df' % ndigits
for e in elem:
n = elem[e] / float(natom) * natoms
cstr += e
if n != 1:
cstr += fmtstr % n
cstr += ' ' if with_spaces else ''
return cstr.strip()
def environment(self, *pos, **kwargs):
"""
Returns a list of neighboring atoms for a given position within the
unit cell. If the material does not contain any atoms a dummy atom will
be placed on the unit cell corners.
Parameters
----------
pos : list or array-like
fractional coordinate in the unit cell
maxdist : float
maximum distance wanted in the list of neighbors (default: 7)
Returns
-------
list of tuples
(distance, atomType, multiplicity) giving distance sorted list of
atoms
"""
valid_kwargs = {'maxdist': 'maximum distance needed in the output'}
utilities.check_kwargs(kwargs, valid_kwargs, 'Crystal.environment')
maxdist = kwargs.get('maxdist', 7)
if len(pos) < 3:
pos = pos[0]
if len(pos) < 3:
raise InputError("need 3 coordinates of the "
"reference position")
refpos = self.lattice._ai.T @ pos
lst = []
# determine lattice base
if self.lattice.nsites > 0:
base = list(self.lattice.base())
else:
base = [(elements.Dummy, (0, 0, 0), 1, 0)]
# find maximally needed super cell
na = int(ceil(maxdist / math.VecNorm(self.a1)))
nb = int(ceil(maxdist / math.VecNorm(self.a2)))
nc = int(ceil(maxdist / math.VecNorm(self.a3)))
nab = int(ceil(maxdist / math.VecNorm(self.a1 + self.a2)))
nac = int(ceil(maxdist / math.VecNorm(self.a1 + self.a3)))
nbc = int(ceil(maxdist / math.VecNorm(self.a2 + self.a3)))
nabc = int(ceil(maxdist / math.VecNorm(self.a1 + self.a2 + self.a3)))
Na = max(na, nab, nac, nabc)
Nb = max(nb, nab, nbc, nabc)
Nc = max(nc, nac, nbc, nabc)
# determine distance of all atoms w.r.t. the refpos
ucidx = numpy.mgrid[-Na:Na+1, -Nb:Nb+1, -Nc:Nc+1].reshape(3, -1)
for a, p, o, b in base:
ucpos = self.lattice._ai.T @ p
pos = ucpos + numpy.einsum('ji, ...i', self.lattice._ai.T, ucidx.T)
distance = math.VecNorm(pos - refpos)
lst += [(d, a, o) for d in distance]
# sort and merge return list
lst.sort(key=operator.itemgetter(0, 1))
rl = []
if len(lst) < 1 or lst[0][0] > maxdist:
return rl
mult = lst[0][2]
for i in range(1, len(lst)):
if (isclose(lst[i - 1][0] - lst[i][0], 0, abs_tol=1e-8) and
lst[i - 1][1] == lst[i][1]):
mult += lst[i - 1][2] # add occupancy
else:
rl.append((lst[i - 1][0], lst[i - 1][1], mult))
mult = lst[i][2]
if lst[i][0] > maxdist:
break
return rl
def planeDistance(self, *hkl):
"""
determines the lattice plane spacing for the planes specified by (hkl)
Parameters
----------
h, k, l : list, tuple or floats
Miller indices of the lattice planes given either as list, tuple or
seperate arguments
Returns
-------
float
the lattice plane spacing
Examples
--------
>>> xu.materials.Si.planeDistance(0, 0, 4)
1.3577600000000001
or
>>> xu.materials.Si.planeDistance((1, 1, 1))
3.1356124059796255
"""
if len(hkl) < 3:
hkl = hkl[0]
if len(hkl) < 3:
raise InputError("need 3 indices for the lattice point")
return 2 * numpy.pi / math.VecNorm(self.Q(hkl))
def _getdensity(self):
"""
calculates the mass density of an material from the mass of the atoms
in the unit cell.
Returns
-------
float
mass density in kg/m^3
"""
m = 0.
for at, pos, occ, b in self.lattice.base():
m += at.weight * occ
return m / self.lattice.UnitCellVolume() * 1e30
density = property(_getdensity)
def _get_f(self, q, en):
"""
optimized method to calculate the atomic scattering factor for all
atoms in the unit cell by calling the database only as much as needed.
Parameters
----------
q : float or array-like
momentum transfer for which the atomic scattering factor should be
calculated
en : float or str
x-ray energy (eV)
Returns
-------
list
atomic scattering factors for every atom in the unit cell
"""
f = {}
if self.lattice.nsites > 0:
for at, pos, occ, b in self.lattice.base():
if at.num not in f:
f[at.num] = at.f(q, en)
return [f[a.num] for a, p, o, b in self.lattice.base()]
else:
return None
def _get_lamen(self, en):
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
lam = utilities.en2lam(en)
return lam, en
def delta(self, en='config'):
"""
function to calculate the real part of the deviation of the
refractive index from 1 (n=1-delta+i*beta)
Parameters
----------
en : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
Returns
-------
float
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
lam, en = self._get_lamen(en)
delta = 0.
f = self._get_f(0, en)
for (at, pos, occ, b), fa in zip(self.lattice.base(), f):
delta += numpy.real(fa) * occ
delta *= re / (2 * numpy.pi) * lam ** 2 / \
self.lattice.UnitCellVolume()
return delta
def ibeta(self, en='config'):
"""
function to calculate the imaginary part of the deviation
of the refractive index from 1 (n=1-delta+i*beta)
Parameters
----------
en : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
Returns
-------
float
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
lam, en = self._get_lamen(en)
beta = 0.
f = self._get_f(0, en)
for (at, pos, occ, b), fa in zip(self.lattice.base(), f):
beta += numpy.imag(fa) * occ
beta *= re / (2 * numpy.pi) * lam ** 2 / self.lattice.UnitCellVolume()
return beta
def chi0(self, en='config'):
"""
calculates the complex chi_0 values often needed in simulations.
They are closely related to delta and beta
(n = 1 + chi_r0/2 + i*chi_i0/2 vs. n = 1 - delta + i*beta)
"""
re = scipy.constants.physical_constants['classical electron radius'][0]
re *= 1e10
lam, en = self._get_lamen(en)
beta = 0.
delta = 0.
if self.lattice.nsites > 0:
f = self._get_f(0, en)
for (at, pos, occ, b), f0 in zip(self.lattice.base(), f):
beta += numpy.imag(f0) * occ
delta += numpy.real(f0) * occ
v = self.lattice.UnitCellVolume()
beta *= re / (2 * numpy.pi) * lam ** 2 / v
delta *= re / (2 * numpy.pi) * lam ** 2 / v
return (-2 * delta + 2j * beta)
def _debyewallerfactor(self, temp, qnorm):
"""
Calculate the Debye Waller temperature factor according to the Debye
temperature
Parameters
----------
temp : float
actual temperature (K)
qnorm : float or array-like
norm of the q-vector(s) for which the factor should be calculated
Returns
-------
float or array-like
the Debye Waller factor(s) with the same shape as qnorm
"""
if temp != 0 and self.thetaDebye:
# W(q) = 3/2* hbar^2*q^2/(m*kB*tD) * (D1(tD/T)/(tD/T) + 1/4)
# DWF = exp(-W(q)) consistent with Vaclav H. and several books
hbar = scipy.constants.hbar
kb = scipy.constants.Boltzmann
x = self.thetaDebye / float(temp)
m = 0.
im = 0
for a, p, o, b in self.lattice.base():
m += a.weight
im += 1
m = m / float(im)
exponentf = 3 / 2. * hbar ** 2 * 1.0e20 / \
(m * kb * self.thetaDebye) * (math.Debye1(x) / x + 0.25)
if config.VERBOSITY >= config.DEBUG:
print("XU.materials.Crystal: DWF = exp(-W*q**2) W= %g"
% exponentf)
dwf = numpy.exp(-exponentf * qnorm ** 2)
else:
dwf = 1.0
return dwf
def chih(self, q, en='config', temp=0, polarization='S'):
"""
calculates the complex polarizability of a material for a certain
momentum transfer and energy
Parameters
----------
q : list, tuple or array-like
momentum transfer vector in (1/A)
en : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
temp : float, optional
temperature used for Debye-Waller-factor calculation
polarization : {'S', 'P'}, optional
sigma or pi polarization
Returns
-------
tuple
(abs(chih_real), abs(chih_imag)) complex polarizability
"""
if isinstance(q, (list, tuple)):
q = numpy.array(q, dtype=numpy.double)
elif isinstance(q, numpy.ndarray):
pass
else:
raise TypeError("q must be a list or numpy array!")
qnorm = math.VecNorm(q)
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
if polarization not in ('S', 'P'):
raise ValueError("polarization must be 'S':sigma or 'P': pi!")
if self.lattice.nsites == 0:
return (0, 0)
dwf = self._debyewallerfactor(temp, qnorm)
sr = 0. + 0.j
si = 0. + 0.j
# a: atom, p: position, o: occupancy, b: temperature-factor
f = self._get_f(qnorm, en)
for (a, p, o, b), F in zip(self.lattice.base(), f):
r = self.lattice.GetPoint(p)
if temp == 0:
dwf = numpy.exp(-b * qnorm ** 2 / (4 * numpy.pi) ** 2)
fr = numpy.real(F) * o
fi = numpy.imag(F) * o
sr += fr * numpy.exp(-1.j * math.VecDot(q, r)) * dwf
si += fi * numpy.exp(-1.j * math.VecDot(q, r)) * dwf
# classical electron radius
c = scipy.constants
r_e = 1 / (4 * numpy.pi * c.epsilon_0) * c.e ** 2 / \
(c.electron_mass * c.speed_of_light ** 2) * 1e10
lam = utilities.en2lam(en)
fact = -lam ** 2 * r_e / (numpy.pi * self.lattice.UnitCellVolume())
rchi = numpy.abs(fact * sr)
ichi = numpy.abs(fact * si)
if polarization == 'P':
theta = numpy.arcsin(qnorm * utilities.en2lam(en) / (4*numpy.pi))
rchi *= numpy.cos(2 * theta)
ichi *= numpy.cos(2 * theta)
return rchi, ichi
def dTheta(self, Q, en='config'):
"""
function to calculate the refractive peak shift
Parameters
----------
Q : list, tuple or array-like
momentum transfer vector (1/A)
en : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
Returns
-------
float
peak shift in degree
"""
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
lam = utilities.en2lam(en)
dth = numpy.degrees(
2 * self.delta(en) / numpy.sin(2 * numpy.arcsin(
lam * VecNorm(Q) / (4 * numpy.pi))))
return dth
def __str__(self):
ostr = super().__str__()
ostr += "Lattice:\n"
ostr += str(self.lattice)
return ostr
def StructureFactor(self, q, en='config', temp=0):
"""
calculates the structure factor of a material
for a certain momentum transfer and energy
at a certain temperature of the material
Parameters
----------
q : list, tuple or array-like
vectorial momentum transfer
en : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
temp : float
temperature used for Debye-Waller-factor calculation
Returns
-------
complex
the complex structure factor
"""
if isinstance(q, (list, tuple)):
q = numpy.array(q, dtype=numpy.double)
elif isinstance(q, numpy.ndarray):
pass
else:
raise TypeError("q must be a list or numpy array!")
if isinstance(en, str) and en == 'config':
en = utilities.energy(config.ENERGY)
if self.lattice.nsites == 0:
return 1.
qnorm = math.VecNorm(q)
dwf = self._debyewallerfactor(temp, qnorm)
s = 0. + 0.j
f = self._get_f(qnorm, en)
# a: atom, p: position, o: occupancy, b: temperature-factor
for (a, p, o, b), fq in zip(self.lattice.base(), f):
r = self.lattice.GetPoint(p)
if temp == 0:
dwf = numpy.exp(-b * qnorm ** 2 /
(4 * numpy.pi) ** 2)
s += fq * o * numpy.exp(-1.j * math.VecDot(q, r)) * dwf
return s
def StructureFactorForEnergy(self, q0, en, temp=0):
"""
calculates the structure factor of a material
for a certain momentum transfer and a bunch of energies
Parameters
----------
q0 : list, tuple or array-like
vectorial momentum transfer
en : list, tuple or array-like
energy values in eV
temp : float
temperature used for Debye-Waller-factor calculation
Returns
-------
array-like
complex valued structure factor array
"""
if isinstance(q0, (list, tuple)):
q = numpy.array(q0, dtype=numpy.double)
elif isinstance(q0, numpy.ndarray):
q = q0
else:
raise TypeError("q must be a list or numpy array!")
qnorm = math.VecNorm(q)
if isinstance(en, (list, tuple)):
en = numpy.array(en, dtype=numpy.double)
elif isinstance(en, numpy.ndarray):
pass
else:
raise TypeError("Energy data must be provided as a list "
"or numpy array!")
if self.lattice.nsites == 0:
return numpy.ones(len(en))
dwf = self._debyewallerfactor(temp, qnorm)
s = 0. + 0.j
f = self._get_f(qnorm, en)
# a: atom, p: position, o: occupancy, b: temperature-factor
for (a, p, o, b), fq in zip(self.lattice.base(), f):
if temp == 0:
dwf = numpy.exp(-b * qnorm ** 2 / (4 * numpy.pi) ** 2)
r = self.lattice.GetPoint(p)
s += fq * o * dwf * numpy.exp(-1.j * math.VecDot(q, r))
return s
def StructureFactorForQ(self, q, en0='config', temp=0):
"""
calculates the structure factor of a material
for a bunch of momentum transfers and a certain energy
Parameters
----------
q : list of vectors or array-like
vectorial momentum transfers; list of vectores (list, tuple or
array) of length 3
e.g.: (Si.Q(0, 0, 4), Si.Q(0, 0, 4.1),...) or
numpy.array([Si.Q(0, 0, 4), Si.Q(0, 0, 4.1)])
en0 : float or str, optional
x-ray energy eV, if omitted the value from the xrayutilities
configuration is used
temp : float
temperature used for Debye-Waller-factor calculation
Returns
-------
array-like
complex valued structure factor array
"""
if isinstance(q, (list, tuple, numpy.ndarray)):
q = numpy.asarray(q, dtype=numpy.double)
else:
raise TypeError("q must be a list or numpy array!")
if len(q.shape) != 2:
raise ValueError("q does not have the correct shape (shape = %s)"
% str(q.shape))
qnorm = numpy.linalg.norm(q, axis=1)
if isinstance(en0, str) and en0 == 'config':
en0 = utilities.energy(config.ENERGY)
if self.lattice.nsites == 0:
return numpy.ones(len(q))
dwf = self._debyewallerfactor(temp, qnorm)
s = 0. + 0.j
f = self._get_f(qnorm, en0)
# a: atom, p: position, o: occupancy, b: temperature-factor
for (a, p, o, b), fq in zip(self.lattice.base(), f):
if temp == 0:
dwf = numpy.exp(-b * qnorm ** 2 / (4 * numpy.pi) ** 2)
r = self.lattice.GetPoint(p)
s += fq * o * numpy.exp(-1.j * numpy.dot(q, r)) * dwf
return s
def ApplyStrain(self, strain):
"""
Applies a certain strain on the lattice of the material. The result is
a change in the base vectors of the real space as well as reciprocal
space lattice. The full strain matrix (3x3) needs to be given.
Note:
NO elastic response of the material will be considered!
"""
# let strain act on the unit cell vectors
self.lattice.ApplyStrain(strain)
def GetMismatch(self, mat):
"""
Calculate the mismatch strain between the material and a second
material
"""
raise NotImplementedError("XU.material.GetMismatch: "
"not implemented yet")
def distances(self):
"""
function to obtain distances of atoms in the crystal up to the unit
cell size (largest value of a, b, c is the cut-off)
returns a list of tuples with distance d and number of occurence n
[(d1, n1), (d2, n2),...]
Note:
if the base of the material is empty the list will be empty
"""
if self.lattice.nsites == 0:
return []
cutoff = numpy.max((self.lattice.a, self.lattice.b, self.lattice.c))
tmp_data = []
for at1 in self.lattice.base():
for at2 in self.lattice.base():
dis = math.VecNorm(self.lattice.GetPoint(at1[1] - at2[1]))
dis2 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((1, 0, 0))))
dis3 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((0, 1, 0))))
dis4 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((0, 0, 1))))
dis5 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((-1, 0, 0))))
dis6 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((0, -1, 0))))
dis7 = math.VecNorm(self.lattice.GetPoint(
at1[1] - at2[1] + numpy.array((0, 0, -1))))
distances = sorted([dis, dis2, dis3, dis4, dis5, dis6, dis7])
for dis in distances:
if dis < cutoff:
tmp_data.append(dis)
# sort the list and compress equal entries
tmp_data.sort()
self._distances = [0]
self._dis_hist = [0]
for dis in tmp_data:
if numpy.round(dis - self._distances[-1], config.DIGITS) == 0:
self._dis_hist[-1] += 1
else:
self._distances.append(dis)
self._dis_hist.append(1)
# create return value
ret = []
for i in range(len(self._distances)):
ret.append((self._distances[i], self._dis_hist[i]))
return ret
def show_unitcell(self, fig=None, subplot=111, scale=0.6, complexity=11,
linewidth=1.5, mode='matplotlib'):
"""
visualization of the unit cell using either matplotlibs basic 3D
functionality (expect rendering inaccuracies!) or the mayavi mlab
package (accurate rendering -> recommended!)
Note:
For more flexible visualization consider using the CIF-export
feature and use a proper crystal structure viewer.
Parameters
----------
fig : matplotlib Figure, Mayavi Scene, or None, optional
subplot : int or list, optional
subplot to use for the visualization when using matplotlib. This
argument of fowarded to the first argument of matplotlibs
`add_subplot` function
scale : float, optional
scale the size of the atoms by this additional factor. By default
the size of the atoms corresponds to 60% of their atomic radius.
complexity : int, optional
number of steps to approximate the atoms as spheres. Higher values
make spheres more accurate, but cause slower plotting.
linewidth : float, optional
line thickness of the unit cell outline
mode : str, optional
defines the plot backend used, can be 'matplotlib' (default)
or 'mayavi'.
Returns
-------
figure object of either matplotlib or Mayavi
"""
if mode == 'matplotlib':
plot, plt = utilities.import_matplotlib_pyplot('XU.materials')
try:
import mpl_toolkits.mplot3d
except ImportError:
plot = False
else:
plot, mlab = utilities.import_mayavi_mlab('XU.materials')
try:
import mayavi
from matplotlib.colors import to_rgb
except ImportError:
plot = False
if not plot:
print('matplotlib and/or mayavi.mlab needed for show_unitcell()')
return
def plot_sphere(fig, vecpos, r, alpha, complexity, color):
"""
Visualize a sphere using either matplotlib or Mayavi
"""
if mode == 'matplotlib':
ax = fig.gca()
phi, theta = numpy.mgrid[0:numpy.pi:1j*complexity,
0:2*numpy.pi:1j*complexity]
x = r*numpy.sin(phi)*numpy.cos(theta) + vecpos[0]
y = r*numpy.sin(phi)*numpy.sin(theta) + vecpos[1]
z = r*numpy.cos(phi) + vecpos[2]
ax.plot_surface(x, y, z, rstride=1, cstride=1,
color=color, alpha=alpha,
linewidth=0)
else:
mlab.points3d(vecpos[0], vecpos[1], vecpos[2], r,
opacity=alpha, transparent=False,
color=to_rgb(color), resolution=complexity,
scale_factor=2, figure=fig)
def plot_line(fig, start, end, color, linewidth):
"""
Draw a line between two 3D points, either using matplotlib or
Mayavi.
"""
if mode == 'matplotlib':
ax = fig.gca()
ax.plot((start[0], end[0]),
(start[1], end[1]),
(start[2], end[2]),
color=color, lw=linewidth)
else:
mlab.plot3d((start[0], end[0]),
(start[1], end[1]),
(start[2], end[2]),
color=to_rgb(color), tube_radius=linewidth/20,
figure=fig)
if mode == 'matplotlib':
if fig is None:
fig = plt.figure()
elif not isinstance(fig, plt.Figure):
raise TypeError("'fig' argument must be a matplotlib figure!")
ax = fig.add_subplot(subplot, projection='3d')
else:
if fig is None:
fig = mlab.figure(bgcolor=(1, 1, 1))
elif not isinstance(fig, mayavi.core.scene.Scene):
raise TypeError("'fig' argument must be a Mayavi Scene!")
for a, pos, occ, b in self.lattice.base():
r = a.radius * scale
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
atpos = (pos + [i, j, k])
if all(a > -config.EPSILON and a < 1+config.EPSILON
for a in atpos):
vecpos = atpos[0]*self.a1 + atpos[1]*self.a2 +\
atpos[2]*self.a3
plot_sphere(fig, vecpos, r, occ, complexity,
a.color)
# plot unit cell outlines
plot_line(fig, (0, 0, 0), self.a1, 'k', linewidth)
plot_line(fig, (0, 0, 0), self.a2, 'k', linewidth)
plot_line(fig, (0, 0, 0), self.a3, 'k', linewidth)
plot_line(fig, self.a1, self.a1+self.a2, 'k', linewidth)
plot_line(fig, self.a1, self.a1+self.a3, 'k', linewidth)
plot_line(fig, self.a2, self.a1+self.a2, 'k', linewidth)
plot_line(fig, self.a2, self.a2+self.a3, 'k', linewidth)
plot_line(fig, self.a3, self.a1+self.a3, 'k', linewidth)
plot_line(fig, self.a3, self.a2+self.a3, 'k', linewidth)
plot_line(fig, self.a1+self.a2, self.a1+self.a2+self.a3, 'k',
linewidth)
plot_line(fig, self.a1+self.a3, self.a1+self.a2+self.a3, 'k',
linewidth)
plot_line(fig, self.a2+self.a3, self.a1+self.a2+self.a3, 'k',
linewidth)
if mode == 'matplotib':
if config.VERBOSITY >= config.INFO_LOW:
warnings.warn("show_unitcell: 3D projection might appear "
"distorted (limited 3D capabilities of "
"matplotlib!). Use mayavi mode or CIF "
"export and other viewers for better "
"visualization.")
plt.tight_layout()
return fig
def CubicElasticTensor(c11, c12, c44):
"""
Assemble the 6x6 matrix of elastic constants for a cubic material from the
three independent components of a cubic crystal
Parameters
----------
c11, c12, c44 : float
independent components of the elastic tensor of cubic materials
Returns
-------
cij : ndarray
6x6 matrix with elastic constants
"""
m = numpy.zeros((6, 6), dtype=numpy.double)
m[0, 0] = c11
m[1, 1] = c11
m[2, 2] = c11
m[3, 3] = c44
m[4, 4] = c44
m[5, 5] = c44
m[0, 1] = m[0, 2] = c12
m[1, 0] = m[1, 2] = c12
m[2, 0] = m[2, 1] = c12
return m
def HexagonalElasticTensor(c11, c12, c13, c33, c44):
"""
Assemble the 6x6 matrix of elastic constants for a hexagonal material from
the five independent components of a hexagonal crystal
Parameters
----------
c11, c12, c13, c33, c44 : float
independent components of the elastic tensor of a hexagonal material
Returns
-------
cij : ndarray
6x6 matrix with elastic constants
"""
m = numpy.zeros((6, 6), dtype=numpy.double)
m[0, 0] = m[1, 1] = c11
m[2, 2] = c33
m[3, 3] = m[4, 4] = c44
m[5, 5] = 0.5 * (c11 - c12)
m[0, 1] = m[1, 0] = c12
m[0, 2] = m[1, 2] = m[2, 0] = m[2, 1] = c13
return m
def WZTensorFromCub(c11ZB, c12ZB, c44ZB):
"""
Determines the hexagonal elastic tensor from the values of the cubic
elastic tensor under the assumptions presented in Phys. Rev. B 6, 4546
(1972), which are valid for the WZ <-> ZB polymorphs.
Parameters
----------
c11, c12, c44 : float
independent components of the elastic tensor of cubic materials
Returns
-------
cij : ndarray
6x6 matrix with elastic constants
Implementation according to a patch submitted by Julian Stangl
"""
# matrix conversions: cubic (111) to hexagonal (001) direction
P = (1 / 6.) * numpy.array([[3, 3, 6],
[2, 4, 8],
[1, 5, -2],
[2, 4, -4],
[2, -2, 2],
[1, -1, 4]])
Q = (1 / (3 * numpy.sqrt(2))) * numpy.array([1, -1, -2])
cZBvec = numpy.array([c11ZB, c12ZB, c44ZB])
cWZvec_BAR = numpy.dot(P, cZBvec)
delta = numpy.dot(Q, cZBvec)
D = numpy.array([delta**2 / cWZvec_BAR[2], 0, -delta**2 / cWZvec_BAR[2],
0, delta**2 / cWZvec_BAR[0], delta**2 / cWZvec_BAR[2]])
cWZvec = cWZvec_BAR - D.T
return HexagonalElasticTensor(cWZvec[0], cWZvec[2], cWZvec[3],
cWZvec[1], cWZvec[4])
class Alloy(Crystal):
"""
alloys two materials from the same crystal system. If the materials have
the same space group the Wyckoff positions within the unit cell will also
reflect the alloying.
"""
def __init__(self, matA, matB, x):
self.check_compatibility(matA, matB)
lat = copy.deepcopy(matA.lattice)
super().__init__("None", lat, matA.cij)
self.matA = matA
self.matB = matB
self._setxb(x)
@staticmethod
def check_compatibility(matA, matB):
csA = matA.lattice.crystal_system.split(':')[0]
csB = matB.lattice.crystal_system.split(':')[0]
if csA != csB:
raise InputError("Crystal systems of the two materials are "
"incompatible!")
@staticmethod
def lattice_const_AB(latA, latB, x, name=''):
"""
method to calculated the interpolation of lattice parameters and unit
cell angles of the Alloy. By default linear interpolation between the
value of material A and B is performed.
Parameters
----------
latA, latB : float or vector
property (lattice parameter/angle) of material A and B. A property
can be a scalar or vector.
x : float
fraction of material B in the alloy.
name : str, optional
label of the property which is interpolated. Can be 'a', 'b', 'c',
'alpha', 'beta', or 'gamma'.
"""
return (latB - latA) * x + latA
def _getxb(self):
return self._xb
def _setxb(self, x):
self._xb = x
self.name = ("%s(%2.2f)%s(%2.2f)"
% (self.matA.name, 1-x, self.matB.name, x))
# modify the free parameters of the lattice
for k in self.lattice.free_parameters:
setattr(self.lattice, k,
self.lattice_const_AB(getattr(self.matA, k),
getattr(self.matB, k), x, name=k))
# set elastic constants
self.cij = (self.matB.cij - self.matA.cij) * x + self.matA.cij
self.cijkl = (self.matB.cijkl - self.matA.cijkl) * x + self.matA.cijkl
# alloying in unit cell
if self.matA.lattice.space_group == self.matB.lattice.space_group:
self.lattice._wbase = WyckoffBase()
for a, wp, o, b in self.matA.lattice._wbase:
self.lattice._wbase.append(a, wp, occ=o*(1-x), b=b)
for a, wp, o, b in self.matB.lattice._wbase:
if (a, wp, o, b) in self.lattice._wbase:
idx = self.lattice._wbase.index((a, wp, o, b))
occ = self.lattice._wbase[idx][2]
self.lattice._wbase[idx] = (a, wp, occ+o*x, b)
else:
self.lattice._wbase.append(a, wp, occ=o*x, b=b)
x = property(_getxb, _setxb)
def _checkfinitenumber(self, arg, name=""):
if isinstance(arg, numbers.Number) and numpy.isfinite(arg):
return float(arg)
else:
raise TypeError("argument (%s) must be a scalar!" % name)
def _checkarray(self, arg, name=""):
if isinstance(arg, (list, tuple, numpy.ndarray)):
return numpy.asarray(arg, dtype=numpy.double)
else:
raise TypeError("argument (%s) must be of type "
"list, tuple or numpy.ndarray" % name)
def _definehelpers(self, hkl, cijA, cijB):
"""
define helper functions for solving the content from reciprocal space
positions
"""
def a1(x):
return self.lattice_const_AB(self.matA.a1, self.matB.a1,
x, name='a')
def a2(x):
return self.lattice_const_AB(self.matA.a2, self.matB.a2,
x, name='b')
def a3(x):
return self.lattice_const_AB(self.matA.a3, self.matB.a3,
x, name='c')
def V(x):
return numpy.dot(a3(x), numpy.cross(a1(x), a2(x)))
def b1(x):
return 2 * numpy.pi / V(x) * numpy.cross(a2(x), a3(x))
def b2(x):
return 2 * numpy.pi / V(x) * numpy.cross(a3(x), a1(x))
def b3(x):
return 2 * numpy.pi / V(x) * numpy.cross(a1(x), a2(x))
def qhklx(x):
return hkl[0] * b1(x) + hkl[1] * b2(x) + hkl[2] * b3(x)
def frac(x):
return ((cijB[0, 2] + cijB[1, 2] - (cijA[0, 2] + cijA[1, 2])) * x +
(cijA[0, 2] + cijA[1, 2])) / \
((cijB[2, 2] - cijA[2, 2]) * x + cijA[2, 2])
return a1, a2, a3, V, b1, b2, b3, qhklx, frac
def RelaxationTriangle(self, hkl, sub, exp):
"""
function which returns the relaxation triangle for a
Alloy of given composition. Reciprocal space coordinates are
calculated using the user-supplied experimental class
Parameters
----------
hkl : list or array-like
Miller Indices
sub : Crystal, or float
substrate material or lattice constant
exp : Experiment
object from which the Transformation object and ndir are needed
Returns
-------
qy, qz : float
reciprocal space coordinates of the corners of the relaxation
triangle
"""
hkl = self._checkarray(hkl, "hkl")
trans = exp._transform
ndir = exp.ndir / VecNorm(exp.ndir)
if isinstance(sub, Crystal):
asub = sub.lattice.a
elif isinstance(sub, float):
asub = sub
else:
raise TypeError("Second argument (sub) must be of type float or "
"an instance of xrayutilities.materials.Crystal")
# test if inplane direction of hkl is the same as the one for the
# experiment otherwise warn the user
hklinplane = VecCross(VecCross(exp.ndir, hkl), exp.ndir)
if not numpy.isclose(VecNorm(VecCross(hklinplane, exp.idir)), 0):
warnings.warn("Alloy: given hkl differs from the geometry of the "
"Experiment instance in the azimuthal direction")
# transform elastic constants to correct coordinate frame
cijA = Cijkl2Cij(trans(self.matA.cijkl, rank=4))
cijB = Cijkl2Cij(trans(self.matB.cijkl, rank=4))
a1, a2, a3, V, b1, b2, b3, qhklx, frac = self._definehelpers(hkl,
cijA,
cijB)
qr_i = trans(qhklx(self.x))[1]
qr_p = trans(qhklx(self.x))[2]
qs_i = copysign(2*numpy.pi/asub * VecNorm(VecCross(ndir, hkl)), qr_i)
qs_p = 2*numpy.pi/asub * abs(VecDot(ndir, hkl))
# calculate pseudomorphic points for A and B
def abulk(x):
return math.VecNorm(a1(x))
def aperp(x):
return abulk(self.x) * (1 + frac(x) * (1 - asub / abulk(self.x)))
qp_i = copysign(2*numpy.pi/asub * VecNorm(VecCross(ndir, hkl)), qr_i)
qp_p = 2*numpy.pi/aperp(self.x) * abs(VecDot(ndir, hkl))
# assembly return values
qy = numpy.array([qr_i, qp_i, qs_i, qr_i], dtype=numpy.double)
qz = numpy.array([qr_p, qp_p, qs_p, qr_p], dtype=numpy.double)
return qy, qz
class CubicAlloy(Alloy):
def __init__(self, matA, matB, x):
# here one could check if material is really cubic!!
Alloy.__init__(self, matA, matB, x)
def ContentBsym(self, q_perp, hkl, inpr, asub, relax):
"""
function that determines the content of B
in the alloy from the reciprocal space position
of a symetric peak. As an additional input the substrates
lattice parameter and the degree of relaxation must be given
Parameters
----------
q_perp : float
perpendicular peak position of the reflection hkl of the alloy in
reciprocal space
hkl : list
Miller indices of the measured symmetric reflection (also defines
the surface normal
inpr : list
Miller indices of a Bragg peak defining the inplane reference
direction
asub : float
substrate lattice parameter
relax : float
degree of relaxation (needed to obtain the content from symmetric
reciprocal space position)
Returns
-------
content : float
the content of B in the alloy determined from the input variables
"""
# check input parameters
q_perp = self._checkfinitenumber(q_perp, "q_perp")
hkl = self._checkarray(hkl, "hkl")
inpr = self._checkarray(inpr, "inpr")
asub = self._checkfinitenumber(asub, "asub")
relax = self._checkfinitenumber(relax, "relax")
# calculate lattice constants from reciprocal space positions
n = self.Q(hkl) / VecNorm(self.Q(hkl))
# the following line is not generally true! only cubic materials
aperp = 2 * numpy.pi / q_perp * abs(VecDot(n, hkl))
# transform the elastic tensors to a coordinate frame attached to the
# surface normal
inp1 = VecCross(n, inpr) / VecNorm(VecCross(n, inpr))
inp2 = VecCross(n, inp1)
trans = math.CoordinateTransform(inp1, inp2, n)
if config.VERBOSITY >= config.DEBUG:
print("XU.materials.Alloy.ContentB: inp1/inp2: ", inp1, inp2)
cijA = Cijkl2Cij(trans(self.matA.cijkl, rank=4))
cijB = Cijkl2Cij(trans(self.matB.cijkl, rank=4))
a1, a2, a3, V, b1, b2, b3, qhklx, frac = self._definehelpers(hkl,
cijA,
cijB)
# the following line is not generally true! only cubic materials
def abulk_perp(x):
return abs(2 * numpy.pi / numpy.inner(qhklx(x), n) *
numpy.inner(n, hkl))
# can we use abulk_perp here? for cubic materials this should work?!
def ainp(x):
return asub + relax * (abulk_perp(x) - asub)
if config.VERBOSITY >= config.DEBUG:
print("XU.materials.Alloy.ContentB: abulk_perp: %8.5g"
% (abulk_perp(0.)))
def equation(x):
return ((aperp - abulk_perp(x)) +
(ainp(x) - abulk_perp(x)) * frac(x))
x = scipy.optimize.brentq(equation, -0.1, 1.1)
return x
def ContentBasym(self, q_inp, q_perp, hkl, sur):
"""
function that determines the content of B
in the alloy from the reciprocal space position
of an asymmetric peak.
Parameters
----------
q_inp : float
inplane peak position of reflection hkl of the alloy in reciprocal
space
q_perp : float
perpendicular peak position of the reflection hkl of the alloy in
reciprocal space
hkl : list
Miller indices of the measured asymmetric reflection
sur : list
Miller indices of the surface (determines the perpendicular
direction)
Returns
-------
content : float
content of B in the alloy determined from the input variables
list
[a_inplane a_perp, a_bulk_perp(x), eps_inplane, eps_perp];
lattice parameters calculated from the reciprocal space positions
as well as the strain (eps) of the layer
"""
# check input parameters
q_inp = self._checkfinitenumber(q_inp, "q_inp")
q_perp = self._checkfinitenumber(q_perp, "q_perp")
hkl = self._checkarray(hkl, "hkl")
sur = self._checkarray(sur, "sur")
# check if reflection is asymmetric
if math.VecNorm(math.VecCross(self.Q(hkl), self.Q(sur))) < 1.e-8:
raise InputError("Miller indices of a symmetric reflection were"
"given where an asymmetric reflection is needed")
# calculate lattice constants from reciprocal space positions
n = self.Q(sur) / VecNorm(self.Q(sur))
q_hkl = self.Q(hkl)
# the following two lines are not generally true! only cubic materials
ainp = 2 * numpy.pi / abs(q_inp) * VecNorm(VecCross(n, hkl))
aperp = 2 * numpy.pi / abs(q_perp) * abs(VecDot(n, hkl))
# transform the elastic tensors to a coordinate frame attached to the
# surface normal
inp1 = VecCross(n, q_hkl) / VecNorm(VecCross(n, q_hkl))
inp2 = VecCross(n, inp1)
trans = math.CoordinateTransform(inp1, inp2, n)
cijA = Cijkl2Cij(trans(self.matA.cijkl, rank=4))
cijB = Cijkl2Cij(trans(self.matB.cijkl, rank=4))
a1, a2, a3, V, b1, b2, b3, qhklx, frac = self._definehelpers(hkl,
cijA,
cijB)
# the following two lines are not generally true! only cubic materials
def abulk_inp(x):
return abs(2 * numpy.pi / numpy.inner(qhklx(x), inp2) *
VecNorm(VecCross(n, hkl)))
def abulk_perp(x):
return abs(2 * numpy.pi / numpy.inner(qhklx(x), n) *
numpy.inner(n, hkl))
if config.VERBOSITY >= config.DEBUG:
print("XU.materials.Alloy.ContentB: abulk_inp/perp: %8.5g %8.5g"
% (abulk_inp(0.), abulk_perp(0.)))
def equation(x):
return ((aperp - abulk_perp(x)) +
(ainp - abulk_inp(x)) * frac(x))
x = scipy.optimize.brentq(equation, -0.1, 1.1)
eps_inplane = (ainp - abulk_perp(x)) / abulk_perp(x)
eps_perp = (aperp - abulk_perp(x)) / abulk_perp(x)
return x, [ainp, aperp, abulk_perp(x), eps_inplane, eps_perp]
def PseudomorphicMaterial(sub, layer, relaxation=0, trans=None):
"""
This function returns a material whos lattice is pseudomorphic on a
particular substrate material. The two materials must have similar unit
cell definitions for the algorithm to work correctly, i.e. it does not work
for combiniations of materials with different lattice symmetry. It is also
crucial that the layer object includes values for the elastic tensor.
Parameters
----------
sub : Crystal
substrate material
layer : Crystal
bulk material of the layer, including its elasticity tensor
relaxation : float, optional
degree of relaxation 0: pseudomorphic, 1: relaxed (default: 0)
trans : Tranform
Transformation which transforms lattice directions into a surface
orientated coordinate frame (x, y inplane, z out of plane). If None a
(001) surface geometry of a cubic material is assumed.
Returns
-------
An instance of Crystal holding the new pseudomorphically
strained material.
Raises
------
InputError
If the layer material has no elastic parameters
"""
def get_inplane(lat):
"""determine inplane lattice parameter"""
return (math.VecNorm(lat.GetPoint(trans.inverse((1, 0, 0)))) +
math.VecNorm(lat.GetPoint(trans.inverse((0, 1, 0))))) / 2.
if not trans:
trans = math.Transform(numpy.identity(3))
if numpy.all(layer.cijkl == 0):
raise InputError("'layer' argument needs elastic parameters")
# calculate the strain
asub = get_inplane(sub.lattice)
abulk = get_inplane(layer.lattice)
apar = asub + (abulk - asub) * relaxation
epar = (apar - abulk) / abulk
cT = trans(layer.cijkl, rank=4)
eperp = -epar * (cT[1, 1, 2, 2] + cT[2, 2, 0, 0]) / (cT[2, 2, 2, 2])
eps = trans.inverse(numpy.diag((epar, epar, eperp)), rank=2)
if config.VERBOSITY >= config.INFO_ALL:
print("XU.materials.PseudomorphicMaterial: applying strain (inplane, "
"perpendicular): %.4g %.4g" % (epar, eperp))
# create the pseudomorphic material
pmlatt = copy.deepcopy(layer.lattice)
pmat = Crystal(layer.name, pmlatt, layer.cij)
pmat.ApplyStrain(eps)
return pmat
| gpl-2.0 |
hsiaoyi0504/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
fredhusser/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
nbarba/py3DRec | image_sequence.py | 1 | 2644 | import numpy as np;
import pandas as pd
from PIL import Image, ImageDraw, ImageFont
class ImageSequence:
'''
Class to hold the necessary information about the image sequence
'''
def __init__(self, filename):
self.load_features(filename)
def feat_2d(self):
return self._feat_2d
def length(self):
return self._length
def number_of_features(self):
return self._number_of_features
def width(self):
return self._width
def height(self):
return self._height
def load_features(self, filename):
'''
Method that loads a txt file containing 2D coordinates of image features. The format of each line should be:
[x y feature_number image_number image_filename]
'''
features_df = pd.read_csv(filename, delimiter=r"\s+", index_col=False)
# get length of sequence and number of features
self._length = int(features_df['image_id'].max())
self._number_of_features = int(features_df['feature_id'].max())
# get the 2d features
self.feat_2d = np.zeros(shape=[self._length, 4, self._number_of_features])
for i in range(1, self._length + 1):
self.feat_2d[i - 1, :, :] = np.transpose(features_df.loc[features_df['image_id'] == i].values)[0:4]
# keep the image filenames
self._image_filenames = features_df.image_filename.unique()
# get the image sequence width and height
image = Image.open(self._image_filenames[0])
self._width = 1024 # image.width
self._height = 768 # image.height
def get_normalized_coordinates(self):
'''
Method to normalize the coordinates to the range [-1,1]
'''
mm = (self._width + self._height) / 2;
rows = (self.feat_2d[:, 0] - np.ones(self.number_of_features) * self._width) / mm
cols = (self.feat_2d[:, 1] - np.ones(self.number_of_features) * self._height) / mm
return np.dstack((rows, cols)).swapaxes(1, 2)
def show(self):
'''
Method to display the sequence, with the 2D features superimposed
'''
font = ImageFont.truetype('/Library/fonts/arial.ttf', 30)
for i in range(0, self.length):
filename = self._image_filenames[i]
image = Image.open(filename)
draw = ImageDraw.Draw(image)
for j in range(0, self.number_of_features):
x = self.feat_2d[i, :, j][0];
y = image.height - self.feat_2d[i, :, j][1]
draw.text((x, y), "+" + str(j), font=font, fill=(0, 255, 0))
image.show()
| mit |
Titan-C/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
sanketloke/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 102 | 5177 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
kernc/scikit-learn | examples/calibration/plot_calibration.py | 33 | 4794 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
ratschlab/oqtans_tools | EasySVM/0.3.3/build/lib.linux-x86_64-2.7/esvm/plots.py | 4 | 8318 | """
This module contains code for commonly used plots
"""
#############################################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
import sys
import random
import numpy
import warnings
import shutil
from shogun.Features import BinaryLabels
from shogun.Evaluation import *
def plotroc(output, LTE, draw_random=False, figure_fname="", roc_label='ROC'):
"""Plot the receiver operating characteristic curve"""
import pylab
import matplotlib
pylab.figure(1,dpi=300,figsize=(8,8))
fontdict=dict(family="serif", weight="bold",size=7,y=1.05) ; # family="cursive"
pm=ROCEvaluation()
pm.evaluate(BinaryLabels(numpy.array(output)), BinaryLabels(numpy.array(LTE)))
points=pm.get_ROC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=roc_label)
if draw_random:
pylab.plot([0, 1], [0, 1], 'r-', label='random guessing')
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('1 - specificity (false positive rate)',size=10)
pylab.ylabel('sensitivity (true positive rate)',size=10)
pylab.legend(loc='lower right') #, prop = matplotlib.font_manager.FontProperties('small'))
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auROC=pm.get_auROC()
return auROC ;
def plotprc(output, LTE, figure_fname="", prc_label='PRC'):
"""Plot the precision recall curve"""
import pylab
import matplotlib
pylab.figure(2,dpi=300,figsize=(8,8))
pm=PRCEvaluation()
pm.evaluate(BinaryLabels(numpy.array(output)), BinaryLabels(numpy.array(LTE)))
points=pm.get_PRC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=prc_label)
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('sensitivity (true positive rate)',size=10)
pylab.ylabel('precision (1 - false discovery rate)',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auPRC=pm.get_auPRC()
return auPRC ;
def plotcloud(cloud, figure_fname="", label='cloud'):
"""Plot the cloud of points (the first two dimensions only)"""
import pylab
import matplotlib
pylab.figure(1,dpi=300,figsize=(8,8))
pos = []
neg = []
for i in xrange(len(cloud)):
if cloud[i][0]==1:
pos.append(cloud[i][1:])
elif cloud[i][0]==-1:
neg.append(cloud[i][1:])
fontdict=dict(family="serif", weight="bold",size=10,y=1.05) ; # family="cursive"
pylab.title(label, fontdict)
points=numpy.array(pos).T # for pylab.plot
pylab.plot(points[0], points[1], 'b+', label='positive')
points=numpy.array(neg).T # for pylab.plot
pylab.plot(points[0], points[1], 'rx', label='negative')
#pylab.axis([0, 1, 0, 1])
#ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
#pylab.xticks(ticks,size=10)
#pylab.yticks(ticks,size=10)
pylab.xlabel('dimension 1',size=10)
pylab.ylabel('dimension 2',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
def plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
import pylab
import matplotlib
pylab.figure(3, dpi=300, figsize=(8,8))
# summary figures
fontdict=dict(family="serif", weight="bold",size=7,y=1.05) ; # family="cursive"
pylab.subplot(3,2,1)
pylab.title('Total POIM Mass', fontdict)
pylab.plot(poim_totalmass) ;
pylab.ylabel('weight mass', size=5)
pylab.subplot(3,2,3)
pylab.title('POIMs', fontdict)
pylab.pcolor(max_poim, shading='flat') ;
pylab.subplot(3,2,5)
pylab.title('Differential POIMs', fontdict)
pylab.pcolor(diff_poim, shading='flat') ;
for plot in [3, 5]:
pylab.subplot(3,2,plot)
ticks=numpy.arange(1., poimdegree+1, 1, dtype=numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i+1))
ticks[i] = i + 0.5
pylab.yticks(ticks, ticks_str)
pylab.ylabel('degree', size=5)
# per k-mer figures
fontdict=dict(family="serif", weight="bold",size=7,y=1.04) ; # family="cursive"
# 1-mers
pylab.subplot(3,2,2)
pylab.title('1-mer Positional Importance', fontdict)
pylab.pcolor(poim[0], shading='flat') ;
ticks_str = ['A', 'C', 'G', 'T']
ticks = [0.5, 1.5, 2.5, 3.5]
pylab.yticks(ticks, ticks_str, size=5)
pylab.axis([0, max_len, 0, 4])
# 2-mers
pylab.subplot(3,2,4)
pylab.title('2-mer Positional Importance', fontdict)
pylab.pcolor(poim[1], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1+l2)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 16])
# 3-mers
pylab.subplot(3,2,6)
pylab.title('3-mer Positional Importance', fontdict)
pylab.pcolor(poim[2], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
for l3 in ['A', 'C', 'G', 'T']:
if numpy.mod(i,4)==0:
ticks_str.append(l1+l2+l3)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 64])
# x-axis on last two figures
for plot in [5, 6]:
pylab.subplot(3,2,plot)
pylab.xlabel('sequence position', size=5)
# finishing up
for plot in xrange(0,6):
pylab.subplot(3,2,plot+1)
pylab.xticks(fontsize=5)
for plot in [1,3,5]:
pylab.subplot(3,2,plot)
pylab.yticks(fontsize=5)
pylab.subplots_adjust(hspace=0.35) ;
# write to file
warnings.filterwarnings('ignore','Could not match*')
pylab.savefig('/tmp/temppylabfig.png')
shutil.move('/tmp/temppylabfig.png',poimfilename)
| mit |
HoliestCow/ece692_deeplearning | project5/data/make_shuffling_integrations.py | 1 | 7172 |
import numpy as np
import matplotlib.pyplot as plt
import os.path
from rebin import rebin
import glob
from random import shuffle
from joblib import Parallel, delayed
# import time
import h5py
def label_datasets():
targetfile = '/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/answers.csv'
head, tail = os.path.split(targetfile)
# filename = []
source_labels = {}
id2string = {0: 'Background',
1: 'HEU',
2: 'WGPu',
3: 'I131',
4: 'Co60',
5: 'Tc99',
6: 'HEUandTc99'}
f = open(targetfile, 'r')
a = f.readlines()
for i in range(len(a)):
line = a[i].strip()
if line[0] == 'R':
continue
parsed = line.split(',')
filename = parsed[0]
source = parsed[1]
source_time = parsed[2]
source_labels[filename] = {'source': id2string[int(source)],
'time': float(source_time)}
f.close()
return source_labels
def parse_datafiles(targetfile, binno, outdir):
item = targetfile
# for item in filelist:
f = open(item, 'r')
a = f.readlines()
binnumber = 1024
counter = 0
spectra = np.zeros((0, binnumber))
timetracker = 0
energy_deposited = []
for i in range(len(a)):
b = a[i].strip()
b_parsed = b.split(',')
event_time = int(b_parsed[0])
energy_deposited += [float(b_parsed[1])]
timetracker += event_time
# print(timetracker)
if timetracker >= 1E6:
timetracker = 0
source_id = 0
counts, energy_edges = np.histogram(energy_deposited, bins=binnumber, range=(0.0, 3000.0))
spectra = np.vstack((spectra, counts))
counter += 1
# print(max(energy_deposited))
energy_deposited = []
# if counter >= 100:
# break
# print(np.sum(spectra[0, :]))
time = np.linspace(0, counter, counter)
time = time.reshape((time.shape[0], 1))
# print(time.shape, spectra.shape)
tosave = np.hstack((time, spectra))
f.close()
head, tail = os.path.split(item)
print(tail, spectra.shape)
# f = open(os.path.join('./integrations', tail), 'w')
# np.savetxt(f, tosave, delimiter=',')
# f.close()
np.save(os.path.join(outdir, tail[:-4] + '.npy'), tosave)
return
def main():
# only need to do this once.
binnumber = 1024
ncores = 4
nsamples = 50000
# nsamples = 0
filename = 'naive_dataset'
id2string = {0: 'Background',
1: 'HEU',
2: 'WGPu',
3: 'I131',
4: 'Co60',
5: 'Tc99',
6: 'HEUandTc99'}
string2id = {'Background': 0,
'HEU': 1,
'WGPu': 2,
'I131': 3,
'Co60': 4,
'Tc99': 5,
'HEUandTc99': 6}
# sequence_length = 30 # 30 seconds used to guess the next one
filelist = glob.glob('/home/holiestcow/Documents/zephyr/datasets/muse/trainingData/1*.csv')
# shuffle(filelist)
# Parallel(n_jobs=ncores)(delayed(parse_datafiles)(item, binnumber, './integrations') for item in filelist)
# test_filelist = glob.glob('/home/holiestcow/Documents/zephyr/datasets/muse/testData/2*.csv')
# HACK: RIGHT HERE
test_filelist = glob.glob('./test_integrations/2*.npy')
# Parallel(n_jobs=ncores)(delayed(parse_datafiles)(item, binnumber, './test_integrations') for item in test_filelist)
labels = label_datasets()
# NOTE: Slice for background segments
f = h5py.File(filename + '.h5', 'w')
train = f.create_group('training')
test = f.create_group('testing')
validation = f.create_group('validation')
number_of_testing_files = 4800
number_of_training_files = len(labels.keys()) - number_of_testing_files # The last 10000 are for testing
test2train_ratio = number_of_testing_files / number_of_training_files
tostore_spectra = np.zeros((nsamples, 1024))
tostore_labels = np.zeros((nsamples, 1))
filelist = list(labels.keys())
for i in range(nsamples):
# create training dataset
random_file = filelist[np.random.randint(number_of_training_files)]
if i % 100 == 0:
print('training sample: {}'.format(i))
x = np.load('./integrations/' + random_file + '.npy')
# time = x[:, 0]
start = np.random.randint(x.shape[0])
source = 'Background'
if labels[random_file]['source'] != 'Background' and start >= 30:
start = int(labels[random_file]['time'])
source = labels[random_file]['source']
spectra = x[start, 1:]
tostore_spectra[i, :] = spectra
tostore_labels[i] = int(string2id[source])
# g = train.create_group('sample_' + str(i))
# g.create_dataset('spectra', data=spectra, compression='gzip')
# g.create_dataset('spectra', data=spectra)
# g.create_dataset('label', data=int(string2id[source]))
train.create_dataset('spectra', data=tostore_spectra, compression='gzip')
train.create_dataset('labels', data=tostore_labels, compression='gzip')
tostore_spectra = np.zeros((int(nsamples * test2train_ratio), 1024))
tostore_labels = np.zeros((int(nsamples * test2train_ratio), 1))
for i in range(int(nsamples * test2train_ratio)):
# create training dataset
random_file = filelist[number_of_training_files + np.random.randint(number_of_testing_files)]
if i % 100 == 0:
print('testing sample: {}'.format(i))
x = np.load('./integrations/' + random_file + '.npy')
# time = x[:, 0]
start = np.random.randint(x.shape[0])
source = 'Background'
if labels[random_file]['source'] != 'Background' and start >= 30:
start = int(labels[random_file]['time'])
source = labels[random_file]['source']
spectra = x[start, 1:]
tostore_spectra[i, :] = spectra
tostore_labels[i] = int(string2id[source])
# g = test.create_group('sample_' + str(i))
# g.create_dataset('spectra', data=spectra, compression='gzip')
# g.create_dataset('label', data=int(string2id[source]))
test.create_dataset('spectra', data=tostore_spectra, compression='gzip')
test.create_dataset('labels', data=tostore_labels, compression='gzip')
# this is for the validation set, where i have to analyze
# each file individual
for i in range(len(test_filelist)):
if i % 100 == 0:
print('validation sample {}'.format(i))
filename = test_filelist[i]
head, tail = os.path.split(filename)
dataname = tail[:-4]
x = np.load(os.path.join('./test_integrations', dataname + '.npy'))
t = x[:, 0]
spectra = x[:, 1:]
file_sample = validation.create_group(dataname)
file_sample.create_dataset('time', data=t, compression='gzip')
file_sample.create_dataset('spectra', data=spectra, compression='gzip')
f.close()
return
main()
| mit |
slinderman/pyhawkes | data/chalearn/make_figure.py | 1 | 1779 |
import pickle
import os
import gzip
import numpy as np
import matplotlib.pyplot as plt
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
def make_figure_a(S, F, C):
"""
Plot fluorescence traces, filtered fluorescence, and spike times
for three neurons
"""
col = harvard_colors()
dt = 0.02
T_start = 0
T_stop = 1 * 50 * 60
t = dt * np.arange(T_start, T_stop)
ks = [0,1]
nk = len(ks)
fig = create_figure((3,3))
for ind,k in enumerate(ks):
ax = fig.add_subplot(nk,1,ind+1)
ax.plot(t, F[T_start:T_stop, k], color=col[1], label="$F$") # Plot the raw flourescence in blue
ax.plot(t, C[T_start:T_stop, k], color=col[0], lw=1.5, label="$\widehat{F}$") # Plot the filtered flourescence in red
spks = np.where(S[T_start:T_stop, k])[0]
ax.plot(t[spks], C[spks,k], 'ko', label="S") # Plot the spike times in black
# Make a legend
if ind == 0:
# Put a legend above
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.,
prop={'size':9})
# Add labels
ax.set_ylabel("$F_%d(t)$" % (k+1))
if ind == nk-1:
ax.set_xlabel("Time $t$ [sec]")
# Format the ticks
ax.set_ylim([-0.1,1.0])
plt.locator_params(nbins=5, axis="y")
plt.subplots_adjust(left=0.2, bottom=0.2)
fig.savefig("figure3a.pdf")
plt.show()
data_path = os.path.join("data", "chalearn", "small", "network1_oopsi.pkl.gz")
with gzip.open(data_path, 'r') as f:
P, F, Cf, network, pos = pickle.load(f)
S_full = (P > 0.1).astype(np.int)
make_figure_a(S_full, F, Cf) | mit |
zhonghualiu/FaST-LMM | fastlmm/pyplink/setup.py | 1 | 2856 | """
file to set up python package, see http://docs.python.org/2/distutils/setupscript.html for details.
"""
import platform
import os
import sys
import shutil
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.clean import clean as Clean
try:
from Cython.Distutils import build_ext
except Exception:
print "cython needed for installation, please install cython first"
sys.exit()
try:
import numpy
except Exception:
print "numpy needed for installation, please install numpy first"
sys.exit()
def readme():
with open('README.txt') as f:
return f.read()
class CleanCommand(Clean):
description = "Remove build directories, and compiled files (including .pyc)"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('fastlmm'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
#or filename.endswith('.dll')
#or filename.endswith('.pyc')
):
os.unlink(os.path.join(dirpath, filename))
# set up macro
if platform.system() == "Darwin":
macros = [("__APPLE__", "1")]
elif "win" in platform.system().lower():
macros = [("_WIN32", "1")]
ext = [Extension("fastlmm.util.stats.quadform.qfc_src.wrap_qfc", ["fastlmm/util/stats/quadform/qfc_src/wrap_qfc.pyx", "fastlmm/util/stats/quadform/qfc_src/QFC.cpp"], language="c++", define_macros=macros)]
setup(
name='fastlmm',
version='0.1',
description='Fast GWAS',
long_description=readme(),
keywords='gwas bioinformatics LMMs MLMs',
url='',
author='MSR',
author_email='...',
license='non-commercial (MSR-LA)',
packages=[
"fastlmm/association/tests",
"fastlmm/association",
"fastlmm/external/sklearn/externals",
"fastlmm/external/sklearn/metrics",
"fastlmm/external/sklearn",
"fastlmm/external/util",
"fastlmm/external",
"fastlmm/feature_selection",
"fastlmm/inference/bingpc",
"fastlmm/inference",
"fastlmm/pyplink/altset_list",
"fastlmm/pyplink/snpreader",
"fastlmm/pyplink/snpset",
"fastlmm/pyplink",
"fastlmm/util/runner",
"fastlmm/util/stats/quadform",
"fastlmm/util/stats",
"fastlmm/util",
"fastlmm"
],
install_requires=['cython', 'numpy', 'scipy', 'pandas', 'scikit-learn', 'matplotlib'],
#zip_safe=False,
# extensions
cmdclass = {'build_ext': build_ext, 'clean': CleanCommand},
ext_modules = ext,
include_dirs = [numpy.get_include()],
)
| apache-2.0 |
equialgo/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
ryklith/pyltesim | plotting/sinr_analysis_plot_ICC2013.py | 1 | 1189 | #!/usr/bin/env python
''' Plot a cdf from a csv file
File: plot_CDF_from_file.py
'''
__author__ = "Hauke Holtkamp"
__credits__ = "Hauke Holtkamp"
__license__ = "unknown"
__version__ = "unknown"
__maintainer__ = "Hauke Holtkamp"
__email__ = "[email protected]"
__status__ = "Development"
def plot_cdf_from_file(filename):
"""Open file, store cdf to .pdf and .png"""
import numpy as np
import matplotlib.pyplot as plt
data = np.genfromtxt(filename, delimiter=',')
# convert zeros to nans and clear empty rows
data[np.where(data==0)] = np.nan
data = data[~np.isnan(data).all(1)]
if not data.size:
print 'No data in ' + str(filename)
# SINR data is best presented in dB
from utils import utils
data = utils.WTodB(data)
import cdf_plot
label = [ "Iteration %d" %i for i in np.arange(data.shape[0])+1]
cdf_plot.cdf_plot(data, '-', label=label)
# plt.xlabel(xlabel)
# plt.ylabel(ylabel)
# plt.title(title)
plt.savefig(filename+'.pdf', format='pdf')
plt.savefig(filename+'.png', format='png')
if __name__ == '__main__':
import sys
filename = sys.argv[1]
plot_cdf_from_file(filename)
| gpl-2.0 |
stuart-knock/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
CosmicFish/CosmicFish | camb/eftcamb/tests_EFT/python/CAMB_plots_lib/CAMB_comp_plots.py | 1 | 31632 | import numpy as np
import matplotlib.pyplot as plt
import math
from CMB_plots import CMB_plots
class CAMB_results_compare_plot:
"""
class that contains the necessary tools to plot the comparison of two CAMB run
"""
# general plot settings:
color1 = 'red' # default color of the line of the first model
color2 = 'blue' # default color of the line of the second model
color_compa = 'green' # default color of the line of the difference
x_size_inc = 8.30 # x size of the final plots, in inches
y_size_inc = 11.7 # y size of the final plots, in inches
def __init__(self, root1, root2, outpath,
tensor=False, lensing=False, transfer=False,
name1='', name2=''):
"""
Class constructor:
root1 = name of the first CAMB run
root2 = name of the second CAMB run
outpath = path to the output folder
tensor = (optional) specifies wether the results contains the tensor Cls
lensing = (optional) specifies wether the results contains lensing
transfer = (optional) specifies wether the results contains transfer functions
name1 = (optional) specifies the name of the first model. Used for the legend.
name2 = (optional) specifies the name of the second model. Used for the legend.
"""
# store the constructor options:
self.root1 = root1
self.root2 = root2
self.outpath = outpath
self.tensor = tensor
self.lensing = lensing
self.transfer = transfer
# extract the name of the models from the roots:
self.name1 = root1.split("/")[len(root1.split("/"))-1]
self.name2 = root2.split("/")[len(root2.split("/"))-1]
# set the human readable name
if name1=='':
self.name_h1 = ''.join(i for i in self.name1.replace('_',' ') if not i.isdigit())
else:
self.name_h1 = name1
if name2=='':
self.name_h2 = ''.join(i for i in self.name2.replace('_',' ') if not i.isdigit())
else:
self.name_h2 = name2
# load the data:
self.scalCls_1 = np.loadtxt(root1+'_scalCls.dat')
self.scalCovCls_1 = np.loadtxt(root1+'_scalCovCls.dat')
self.scalCls_2 = np.loadtxt(root2+'_scalCls.dat')
self.scalCovCls_2 = np.loadtxt(root2+'_scalCovCls.dat')
if self.lensing:
self.lensedCls_1 = np.loadtxt(root1+'_lensedCls.dat')
self.lenspotentialCls_1 = np.loadtxt(root1+'_lenspotentialCls.dat')
self.lensedCls_2 = np.loadtxt(root2+'_lensedCls.dat')
self.lenspotentialCls_2 = np.loadtxt(root2+'_lenspotentialCls.dat')
if self.transfer:
self.matterpower_1 = np.loadtxt(root1+'_matterpower.dat')
self.transfer_func_1 = np.loadtxt(root1+'_transfer_out.dat')
self.matterpower_2 = np.loadtxt(root2+'_matterpower.dat')
self.transfer_func_2 = np.loadtxt(root2+'_transfer_out.dat')
if self.tensor:
self.tensCls_1 = np.loadtxt(root1+'_tensCls.dat')
self.totCls_1 = np.loadtxt(root1+'_totCls.dat')
self.tensCls_2 = np.loadtxt(root2+'_tensCls.dat')
self.totCls_2 = np.loadtxt(root2+'_totCls.dat')
if self.lensing and self.tensor:
self.lensedtotCls_1 = np.loadtxt(root1+'_lensedtotCls.dat')
self.lensedtotCls_2 = np.loadtxt(root2+'_lensedtotCls.dat')
def plot_compare_scalCls(self):
"""
Plots and saves the comparison of all the scalar Cls in a unique image
"""
# number of Cls in the two files:
num1 = self.scalCls_1.shape[1]-1
num2 = self.scalCls_2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of Cls'
return
if len(self.scalCls_1[:,0])!=len(self.scalCls_2[:,0]):
print 'different lmax'
return
if len(self.matterpower_1[:,0])!=len(self.matterpower_2[:,0]):
self.transfer = False
# add the matter power spectrum if required:
if self.transfer: num1 += 1
# values of l
xvalues = self.scalCls_1[:,0]
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
# do the plots:
for ind in xrange(1,num1+1):
# distribute the plots in the figure:
temp = plt.subplot2grid((num1,2), (ind-1, 0))
temp_comp = plt.subplot2grid((num1,2), (ind-1, 1))
if ind == 1: # TT power spectrum:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
temp_comp.set_yscale('Log')
temp.set_title('TT power spectrum')
elif ind == 2: # EE power spectrum:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.EE_plot(temp, xvalues, yvalues_1)
plots_2.EE_plot(temp, xvalues, yvalues_2)
plots_compa.EE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('EE power spectrum')
elif ind == 3: # TE power spectrum:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.TE_plot(temp, xvalues, yvalues_1)
plots_2.TE_plot(temp, xvalues, yvalues_2)
plots_compa.TE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('TE power spectrum')
elif ind == 4 and self.lensing: # CMB lensing power spectrum:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.Phi_plot(temp, xvalues, yvalues_1)
plots_2.Phi_plot(temp, xvalues, yvalues_2)
plots_compa.Phi_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('$\phi$ power spectrum')
elif ind == 5 and self.lensing: # CMB lensing - Temperature power spectrum:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.PhiT_plot(temp, xvalues, yvalues_1)
plots_2.PhiT_plot(temp, xvalues, yvalues_2)
plots_compa.PhiT_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('$\phi$T power spectrum')
elif ind == num1 and self.transfer: # matter power spectrum:
xvalues = self.matterpower_2[:,0]
yvalues_1 = self.matterpower_1[:,1]
yvalues_2 = self.matterpower_2[:,1]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.Matter_plot(temp, xvalues, yvalues_1)
plots_2.Matter_plot(temp, xvalues, yvalues_2)
plots_compa.Matter_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('Matter power spectrum')
else: # generic Cl comparison:
yvalues_1 = self.scalCls_1[:,ind]
yvalues_2 = self.scalCls_2[:,ind]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.Generic_Cl(temp, xvalues, yvalues_1)
plots_2.Generic_Cl(temp, xvalues, yvalues_2)
plots_compa.Generic_Cl(temp_comp, xvalues, yvalues_comp)
# set the size of the image
fig.set_size_inches( self.x_size_inc, self.y_size_inc)
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global title
plt.suptitle(self.name_h1+' VS '+self.name_h2+
' comparison of scalar Cls', fontsize=16)
# set the global legend
fig.legend( handles = [plots_1.TT_p, plots_2.TT_p, plots_compa.CV_p],
labels = [self.name_h1, self.name_h2, 'Cosmic variance'],
loc='lower center', ncol=3 ,fancybox=True)
# adjust the size of the plot
fig.subplots_adjust(top=0.92, bottom=0.08)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_scalCls_comp.pdf')
plt.clf()
plt.close("all")
def plot_compare_scalCovCls(self):
"""
Plots and saves the comparison of all the scalar Cov Cls in a unique image
"""
# number of Cls:
num1 = self.scalCovCls_1.shape[1]-1
num2 = self.scalCovCls_2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of Cls'
return
if len(self.scalCovCls_1[:,0])!=len(self.scalCovCls_2[:,0]):
print 'different lmax'
return
# size of the Cl Cov matrix:
num1 = int(math.sqrt(num1))
num_tot = sum(xrange(1,num1+1))
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
# setup a dictionary with the names of the Cls
dict = { 1: 'T', 2: 'E', 3: '$\phi$'}
for i in xrange(4, num1+1):
dict[i] = 'W'+str(i)
# values of the multipoles:
xvalues = self.scalCovCls_1[:,0]
# other stuff:
ind_tot = 0
# do the plots:
for ind in xrange(1,num1+1):
for ind2 in xrange(1, ind+1):
ind_tot += 1
# place the plots:
temp = plt.subplot2grid((num_tot,2), (ind_tot-1, 0))
temp_comp = plt.subplot2grid((num_tot,2), (ind_tot-1, 1))
# values of the Cls:
col = ind + num1*(ind2-1)
yvalues_1 = self.scalCovCls_1[:,col]
yvalues_2 = self.scalCovCls_2[:,col]
# protection against values equal to zero:
yvalues_temp = np.array(map(abs, yvalues_1))
min2val = np.min(yvalues_temp[np.nonzero(yvalues_temp)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
# computation of the percentual comparison:
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# protection against values too small:
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
# make the plots:
plots_1.Generic_Cl(temp, xvalues, yvalues_1)
plots_2.Generic_Cl(temp, xvalues, yvalues_2)
plots_compa.Generic_Cl(temp_comp, xvalues, yvalues_comp)
temp.set_title(dict[ind2]+dict[ind]+' power spectrum')
# set the size of the image
fig.set_size_inches( self.x_size_inc, self.y_size_inc/5.0*num_tot)
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global legend
fig.legend( handles = [plots_1.Generic_Cl_plot, plots_2.Generic_Cl_plot, plots_compa.CV_p],
labels = [self.name_h1, self.name_h2, 'Cosmic variance'],
loc='lower center', ncol=3 ,fancybox=True)
# set the global title
plt.suptitle(self.name_h1+' VS '+self.name_h2+
' comparison of scalar Cov Cls', fontsize=16)
# adjust the size of the plot
fig.subplots_adjust(top=0.92, bottom=0.08)
# fig.subplots_adjust(top=0.96, bottom=0.01)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_scalCovCls_comp.pdf')
plt.clf()
plt.close("all")
def plot_compare_lensedCls(self):
"""
Plots and saves the comparison of all the lensed Cls in a unique image
"""
# protection from direct calls if lensing is not included
if not self.lensing: return
# number of Cls:
num1 = self.lensedCls_1.shape[1]-1
num2 = self.lensedCls_2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of Cls'
return
if len(self.lensedCls_1[:,0])!=len(self.lensedCls_2[:,0]):
print 'different lmax'
return
xvalues = self.lensedCls_1[:,0]
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
# do the plots:
for ind in xrange(1,num1+1):
temp = plt.subplot2grid((num1,2), (ind-1, 0))
temp_comp = plt.subplot2grid((num1,2), (ind-1, 1))
yvalues_1 = self.lensedCls_1[:,ind]
yvalues_2 = self.lensedCls_2[:,ind]
min2val = np.min(yvalues_1[np.nonzero(yvalues_1)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# Protection against all zero: put to machine precision the values that are zero
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
if ind == 1:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
temp_comp.set_yscale('Log')
temp.set_title('TT power spectrum')
elif ind == 2:
plots_1.EE_plot(temp, xvalues, yvalues_1)
plots_2.EE_plot(temp, xvalues, yvalues_2)
plots_compa.EE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('EE power spectrum')
elif ind == 3:
plots_1.BB_plot(temp, xvalues, yvalues_1)
plots_2.BB_plot(temp, xvalues, yvalues_2)
plots_compa.BB_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('BB power spectrum')
elif ind == 4:
plots_1.TE_plot(temp, xvalues, yvalues_1)
plots_2.TE_plot(temp, xvalues, yvalues_2)
plots_compa.TE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('TE power spectrum')
else:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
# set the size of the image
fig.set_size_inches( self.x_size_inc, self.y_size_inc)
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global title
plt.suptitle(self.name_h1+' VS '+self.name_h2+
' comparison of lensed Cls', fontsize=16)
# set the global legend
fig.legend( handles = [plots_1.TT_p, plots_2.TT_p, plots_compa.CV_p],
labels = [self.name_h1, self.name_h2, 'Cosmic variance'],
loc='lower center', ncol=3 ,fancybox=True)
# adjust the size of the plot
fig.subplots_adjust(top=0.92, bottom=0.08)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_lensedCls_comp.pdf')
plt.clf()
plt.close("all")
def plot_compare_tensCls(self):
"""
Plots and saves the comparison of all the tensor Cls in a unique image
"""
# protection from direct calls if tensors are not included
if not self.tensor: return
# number of Cls:
num1 = self.tensCls_1.shape[1]-1
num2 = self.tensCls_2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of Cls'
return
if len(self.tensCls_1[:,0])!=len(self.tensCls_2[:,0]):
print 'different lmax'
return
xvalues = self.tensCls_1[:,0]
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
# do the plots:
for ind in xrange(1,num1+1):
temp = plt.subplot2grid((num1,2), (ind-1, 0))
temp_comp = plt.subplot2grid((num1,2), (ind-1, 1))
yvalues_1 = self.tensCls_1[:,ind]
yvalues_2 = self.tensCls_2[:,ind]
min2val = np.min(yvalues_1[np.nonzero(yvalues_1)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# Protection against all zero: put to machine precision the values that are zero
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
if ind == 1:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
temp.set_yscale('Log')
temp.set_title('TT power spectrum')
elif ind == 2:
plots_1.EE_plot(temp, xvalues, yvalues_1)
plots_2.EE_plot(temp, xvalues, yvalues_2)
plots_compa.EE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('EE power spectrum')
elif ind == 3:
plots_1.BB_plot(temp, xvalues, yvalues_1)
plots_2.BB_plot(temp, xvalues, yvalues_2)
plots_compa.BB_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('BB power spectrum')
elif ind == 4:
plots_1.TE_plot(temp, xvalues, yvalues_1)
plots_2.TE_plot(temp, xvalues, yvalues_2)
plots_compa.TE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('TE power spectrum')
else:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
# set the size of the image
fig.set_size_inches( self.x_size_inc, self.y_size_inc)
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global title
plt.suptitle(self.name_h1+' VS '+self.name_h2+
' comparison of tensor Cls', fontsize=16)
# set the global legend
fig.legend( handles = [plots_1.TT_p, plots_2.TT_p, plots_compa.CV_p],
labels = [self.name_h1, self.name_h2, 'Cosmic variance'],
loc='lower center', ncol=3 ,fancybox=True)
# adjust the size of the plot
fig.subplots_adjust(top=0.92, bottom=0.08)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_tensCls_comp.pdf')
plt.clf()
plt.close("all")
def plot_compare_totalCls(self):
"""
Plots and saves the comparison of all the total (scalar + tensor) Cls in a unique image
If lensing is included lensed Cls are used.
"""
# protection from direct calls if tensors are not included
if not self.tensor: return
# decide what data to use:
if self.lensing:
data1 = self.lensedtotCls_1
data2 = self.lensedtotCls_2
else:
data1 = self.totCls_1
data2 = self.totCls_2
# number of Cls:
num1 = data1.shape[1]-1
num2 = data2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of Cls'
return
if len(data1[:,0])!=len(data2[:,0]):
print 'different lmax'
return
xvalues = data1[:,0]
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
# do the plots:
for ind in xrange(1,num1+1):
temp = plt.subplot2grid((num1,2), (ind-1, 0))
temp_comp = plt.subplot2grid((num1,2), (ind-1, 1))
yvalues_1 = data1[:,ind]
yvalues_2 = data2[:,ind]
min2val = np.min(yvalues_1[np.nonzero(yvalues_1)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# Protection against all zero: put to machine precision the values that are zero
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
if ind == 1:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
temp.set_yscale('Log')
temp.set_title('TT power spectrum')
elif ind == 2:
plots_1.EE_plot(temp, xvalues, yvalues_1)
plots_2.EE_plot(temp, xvalues, yvalues_2)
plots_compa.EE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('EE power spectrum')
elif ind == 3:
plots_1.BB_plot(temp, xvalues, yvalues_1)
plots_2.BB_plot(temp, xvalues, yvalues_2)
plots_compa.BB_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('BB power spectrum')
elif ind == 4:
plots_1.TE_plot(temp, xvalues, yvalues_1)
plots_2.TE_plot(temp, xvalues, yvalues_2)
plots_compa.TE_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title('TE power spectrum')
else:
plots_1.TT_plot(temp, xvalues, yvalues_1)
plots_2.TT_plot(temp, xvalues, yvalues_2)
plots_compa.TT_plot(temp_comp, xvalues, yvalues_comp)
# set the size of the image
fig.set_size_inches( self.x_size_inc, self.y_size_inc)
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global title
if self.lensing:
plt.suptitle(self.name_h1+' VS '+self.name_h2+' comparison of total lensed Cls', fontsize=16)
else:
plt.suptitle(self.name_h1+' VS '+self.name_h2+' comparison of total Cls', fontsize=16)
# set the global legend
fig.legend( handles = [plots_1.TT_p, plots_2.TT_p, plots_compa.CV_p],
labels = [self.name_h1, self.name_h2, 'Cosmic variance'],
loc='lower center', ncol=3 ,fancybox=True)
# adjust the size of the plot
fig.subplots_adjust(top=0.92, bottom=0.08)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_totCls_comp.pdf')
plt.clf()
plt.close("all")
def plot_compare_Transfer(self):
"""
Plots and saves the comparison of all the transfer functions in a unique image
"""
# protection from direct calls if transfer functions are not included
if not self.transfer: return
data1 = self.transfer_func_1
data2 = self.transfer_func_2
# number of transfer functions:
num1 = data1.shape[1]-1
num2 = data2.shape[1]-1
# protection against different runs
if num1!=num2:
print 'wrong number of transfer functions'
return
if len(data1[:,0])!=len(data2[:,0]):
print 'Different values of k'
return
xvalues = data1[:,0]
# set up the plots:
plots_1 = CMB_plots()
plots_2 = CMB_plots()
plots_compa = CMB_plots()
plots_1.color = self.color1
plots_2.color = self.color2
plots_compa.color = self.color_compa
plots_compa.comparison = True
plots_compa.axes_label_position = 'right'
fig = plt.gcf()
labels = [ 'CDM', 'baryons', 'photons', 'massless neutrinos', 'massive neutrinos',
'CDM+baryons+massive neutrinos', 'CDM+baryons', 'CDM+baryons+massive neutrinos+ de',
'The Weyl potential', 'vel_Newt_cdm', 'vel_Newt_b', 'relative baryon-CDM velocity'
]
if not len(labels) == num1:
print 'Not enough transfer functions'
return
# do the plots:
for ind in xrange(1,num1+1):
temp = plt.subplot2grid((num1,2), (ind-1, 0))
temp_comp = plt.subplot2grid((num1,2), (ind-1, 1))
yvalues_1 = data1[:,ind]
yvalues_2 = data2[:,ind]
min2val = np.min(yvalues_1[np.nonzero(yvalues_1)])
np.place(yvalues_1, yvalues_1 == 0.0, min2val)
yvalues_comp = (yvalues_1 - yvalues_2)/abs(yvalues_1)*100
# Protection against all zero: put to machine precision the values that are zero
np.place(yvalues_comp, abs(yvalues_comp)<10.0**(-16), [10.0**(-16)])
plots_1.Transfer_plot(temp, xvalues, yvalues_1)
plots_2.Transfer_plot(temp, xvalues, yvalues_2)
plots_compa.Transfer_plot(temp_comp, xvalues, yvalues_comp)
temp.set_title(labels[ind-1])
# set the size of the image
fig.set_size_inches( self.x_size_inc, 1.61803398875*self.x_size_inc/6.*num1 )
# set a tight layout
fig.tight_layout(pad=0.3, h_pad=0.3, w_pad=0.3)
# set the global title
plt.suptitle(self.name_h1+' VS '+self.name_h2+' comparison of transfer functions', fontsize=16)
# set the global legend
fig.legend( handles = [plots_1.Transfer_p, plots_2.Transfer_p],
labels = [self.name_h1, self.name_h2],
loc='lower center', ncol=3 ,fancybox=True)
# adjust the size of the plot
fig.subplots_adjust(top=0.95, bottom=0.05)
# save the result and close
plt.savefig(self.outpath+self.name1+'_'+self.name2+'_transfer_comp.pdf')
plt.clf()
plt.close("all")
| gpl-3.0 |
DonBeo/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 7 | 10974 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
| bsd-3-clause |
sssllliang/BuildingMachineLearningSystemsWithPython | ch02/chapter.py | 17 | 4700 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from matplotlib import pyplot as plt
import numpy as np
# We load the data with load_iris from sklearn
from sklearn.datasets import load_iris
data = load_iris()
# load_iris returns an object with several fields
features = data.data
feature_names = data.feature_names
target = data.target
target_names = data.target_names
for t in range(3):
if t == 0:
c = 'r'
marker = '>'
elif t == 1:
c = 'g'
marker = 'o'
elif t == 2:
c = 'b'
marker = 'x'
plt.scatter(features[target == t, 0],
features[target == t, 1],
marker=marker,
c=c)
# We use NumPy fancy indexing to get an array of strings:
labels = target_names[target]
# The petal length is the feature at position 2
plength = features[:, 2]
# Build an array of booleans:
is_setosa = (labels == 'setosa')
# This is the important step:
max_setosa =plength[is_setosa].max()
min_non_setosa = plength[~is_setosa].min()
print('Maximum of setosa: {0}.'.format(max_setosa))
print('Minimum of others: {0}.'.format(min_non_setosa))
# ~ is the boolean negation operator
features = features[~is_setosa]
labels = labels[~is_setosa]
# Build a new target variable, is_virigina
is_virginica = (labels == 'virginica')
# Initialize best_acc to impossibly low value
best_acc = -1.0
for fi in range(features.shape[1]):
# We are going to test all possible thresholds
thresh = features[:,fi]
for t in thresh:
# Get the vector for feature `fi`
feature_i = features[:, fi]
# apply threshold `t`
pred = (feature_i > t)
acc = (pred == is_virginica).mean()
rev_acc = (pred == ~is_virginica).mean()
if rev_acc > acc:
reverse = True
acc = rev_acc
else:
reverse = False
if acc > best_acc:
best_acc = acc
best_fi = fi
best_t = t
best_reverse = reverse
print(best_fi, best_t, best_reverse, best_acc)
def is_virginica_test(fi, t, reverse, example):
'Apply threshold model to a new example'
test = example[fi] > t
if reverse:
test = not test
return test
from threshold import fit_model, predict
# ning accuracy was 96.0%.
# ing accuracy was 90.0% (N = 50).
correct = 0.0
for ei in range(len(features)):
# select all but the one at position `ei`:
training = np.ones(len(features), bool)
training[ei] = False
testing = ~training
model = fit_model(features[training], is_virginica[training])
predict(model, features[testing])
predictions = predict(model, features[testing])
correct += np.sum(predictions == is_virginica[testing])
acc = correct/float(len(features))
print('Accuracy: {0:.1%}'.format(acc))
###########################################
############## SEEDS DATASET ##############
###########################################
from load import load_dataset
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
features, labels = load_dataset('seeds')
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=1)
from sklearn.cross_validation import KFold
kf = KFold(len(features), n_folds=5, shuffle=True)
means = []
for training,testing in kf:
# We learn a model for this fold with `fit` and then apply it to the
# testing data with `predict`:
classifier.fit(features[training], labels[training])
prediction = classifier.predict(features[testing])
# np.mean on an array of booleans returns fraction
# of correct decisions for this fold:
curmean = np.mean(prediction == labels[testing])
means.append(curmean)
print('Mean accuracy: {:.1%}'.format(np.mean(means)))
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
classifier = KNeighborsClassifier(n_neighbors=1)
classifier = Pipeline([('norm', StandardScaler()), ('knn', classifier)])
means = []
for training,testing in kf:
# We learn a model for this fold with `fit` and then apply it to the
# testing data with `predict`:
classifier.fit(features[training], labels[training])
prediction = classifier.predict(features[testing])
# np.mean on an array of booleans returns fraction
# of correct decisions for this fold:
curmean = np.mean(prediction == labels[testing])
means.append(curmean)
print('Mean accuracy: {:.1%}'.format(np.mean(means)))
| mit |
nikitasingh981/scikit-learn | sklearn/metrics/tests/test_classification.py | 8 | 57459 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='binary')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"])
y_true_inv2 = np.where(y_true_inv2, 'a', 'b')
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, ['a'] * len(y_true))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
assert_raises(ValueError, confusion_matrix, y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, target_names=target_names)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1])
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='binary')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
for y_true, y_pred, y_type in [
(y_true_mc, y_pred_mc, 'multiclass'),
(y_true_ind, y_pred_ind, 'multilabel-indicator'),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
assert_raise_message(ValueError,
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type,
metric, y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary():
# https://github.com/scikit-learn/scikit-learn/issues/8098
y_true = [0, 1]
y_pred = [0, -1]
assert_equal(_check_targets(y_true, y_pred)[0], 'multiclass')
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = ('y_true contains only one label (2). Please provide '
'the true labels explicitly through the labels argument.')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
# calculate even if only single class in y_true (#6980)
assert_almost_equal(brier_score_loss([0], [0.5]), 0.25)
assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
| bsd-3-clause |
fumitoh/modelx | modelx/tests/io/test_pandas.py | 1 | 5359 | import modelx as mx
import pandas as pd
import numpy as np
import pytest
_pd_ver = tuple(int(i) for i in pd.__version__.split("."))[:-1]
@pytest.fixture
def testspace():
model = mx.new_model()
space = model.new_space()
def f0():
return 3
def f1(x):
return 2 * x
def f2(x, y=1):
return x + y
f0, f1, f2 = mx.defcells(f0, f1, f2)
return space
@pytest.fixture
def space_with_string_index():
model = mx.new_model()
space = model.new_space()
def f0(strind):
return strind
def f1():
return 3
mx.defcells(f0, f1)
return space
# -------------------------------------------------------------------------
# Test Conversion from Cells to DataFrame and Series
def test_cells_empty(testspace):
for c in ["f0", "f1", "f2"]:
assert testspace.cells[c].to_series().empty
assert testspace.cells[c].series.empty
assert testspace.cells[c].to_frame().empty
assert testspace.cells[c].frame.empty
@pytest.mark.parametrize(
"cells, args, length",
[
["f0", ((),), 1],
["f1", (1, 2, 3), 3],
["f1", ((1, 2, 3),), 3],
["f2", ((1, 2), (3, 4), (5, 6)), 3],
["f2", (((1, 2), (3, 4), (5, 6))), 3],
],
)
def test_cells_to_frame_with_args(testspace, cells, args, length):
assert len(testspace.cells[cells].to_frame(*args).index) == length
assert len(testspace.cells[cells].to_frame()) == length
assert len(testspace.cells[cells].frame) == length
@pytest.mark.parametrize(
"cells, args, length",
[
["f0", ((),), 1],
["f1", (1, 2, 3), 3],
["f1", ((1, 2, 3),), 3],
["f2", ((1, 2), (3, 4), (5, 6)), 3],
["f2", (((1, 2), (3, 4), (5, 6)),), 3],
],
)
def test_cells_to_series_with_args(testspace, cells, args, length):
assert len(testspace.cells[cells].to_series(*args).index) == length
assert len(testspace.cells[cells].to_series()) == length
assert len(testspace.cells[cells].series) == length
# -------------------------------------------------------------------------
# Test Conversion from Space to DataFrame
def test_space_to_frame_empty(testspace):
assert testspace.to_frame().empty
assert testspace.frame.empty
if _pd_ver >= (0, 20):
@pytest.mark.parametrize(
"args, idxlen, cols",
[
[((1, 2), (3, 4), (5, 6)), 7, {"f0", "f1", "f2"}],
[(((1, 2), (3, 4), (5, 6)),), 7, {"f0", "f1", "f2"}],
],
)
def test_space_to_frame_args(testspace, args, idxlen, cols):
assert testspace.to_frame().empty
df = testspace.to_frame(*args)
assert set(df.columns) == cols
assert len(df.index) == idxlen
if len(args) == 1:
args = args[0]
for arg in args:
dfx = df.xs(arg[0], level="x")
assert int(dfx.loc[dfx.index.isnull(), "f1"]) == testspace.f1(
arg[0]
)
assert df.loc[arg, "f2"] == testspace.f2(*arg)
@pytest.mark.parametrize(
"args, idxlen, cols",
[
[(1, 2, 3), 7, {"f0", "f1", "f2"}],
[((1, 2, 3),), 7, {"f0", "f1", "f2"}],
],
)
def test_space_to_frame_args_defaults(testspace, args, idxlen, cols):
assert testspace.to_frame().empty
df = testspace.to_frame(*args)
assert set(df.columns) == cols
assert len(df.index) == idxlen
if isinstance(args[0], tuple):
args = args[0]
for arg in args:
assert df.loc[(arg, 1), "f2"] == testspace.f2(arg, 1)
def test_space_with_string_index_to_frame(space_with_string_index):
"""When index contains string and NaN"""
s = space_with_string_index
s.f0("foo")
s.f1()
df = pd.DataFrame(
data={"f0": ["foo", np.NaN], "f1": [np.NaN, 3.0]},
index=pd.Index(["foo", np.NaN], name="strind"),
)
assert s.frame.equals(df)
# -------------------------------------------------------------------------
# Test Conversion from CellsView to DataFrame
if _pd_ver >= (0, 20):
@pytest.mark.parametrize(
"args, idxlen, cols",
[
[((1, 2), (3, 4), (5, 6)), 7, ["f0", "f1", "f2"]],
[(((1, 2), (3, 4), (5, 6)),), 7, ["f0", "f1", "f2"]],
],
)
def test_cellsview_to_frame_args(testspace, args, idxlen, cols):
assert testspace.cells[cols].to_frame().empty
df = testspace.cells[cols].to_frame(*args)
assert set(df.columns) == set(cols)
assert len(df.index) == idxlen
if len(args) == 1:
args = args[0]
for arg in args:
dfx = df.xs(arg[0], level="x")
assert int(dfx.loc[dfx.index.isnull(), "f1"]) == testspace.f1(
arg[0]
)
assert df.loc[arg, "f2"] == testspace.f2(*arg)
@pytest.mark.parametrize(
"args, idxlen, cols",
[
[(1, 2, 3), 7, ["f0", "f1", "f2"]],
[((1, 2, 3),), 7, ["f0", "f1", "f2"]],
],
)
def test_cellsview_to_frame_args_defaults(testspace, args, idxlen, cols):
assert testspace.cells[cols].to_frame().empty
df = testspace.cells[cols].to_frame(*args)
assert set(df.columns) == set(cols)
assert len(df.index) == idxlen
if isinstance(args[0], tuple):
args = args[0]
for arg in args:
assert df.loc[(arg, 1), "f2"] == testspace.f2(arg, 1)
| gpl-3.0 |
davemccormick/pyAnimalTrack | src/pyAnimalTrack/ui/Model/TableModel.py | 1 | 4032 | from PyQt5.QtCore import QAbstractTableModel, QVariant
from PyQt5.Qt import Qt
from pyAnimalTrack.backend.filehandlers.input_data import InputData
class TableModel(QAbstractTableModel):
def __init__(self, input_data):
""" Constructor
:returns: void
"""
super(TableModel, self).__init__()
self.__dataFile = input_data
self.__dataSet = self.__dataFile.getData()
def rowCount(self, QModelIndex_parent=None, *args, **kwargs):
""" Gets the number of data rows. Used by PyQt.
:param QModelIndex_parent: -
:param args: -
:param kwargs: -
:returns: The number of data rows
"""
return len(self.__dataSet.index)
def columnCount(self, QModelIndex_parent=None, *args, **kwargs):
""" Gets the number of columns used in the dataset Used by PyQt.
:param QModelIndex_parent: -
:param args: -
:param kwargs: -
:returns: The number of columns for the dataset
"""
return len(self.__dataSet.columns)
def headerData(self, index, Qt_Orientation, role=None):
""" Gets a header for a row/column of data. Used by PyQt.
:param index: The column/row index
:param Qt_Orientation: The alignment of the header, Qt.Horizontal or Qt.Vertical
:param role: ?
:returns: A string containing the text to show as the header
"""
if role == Qt.DisplayRole:
if Qt_Orientation == Qt.Horizontal:
return self.__dataFile.getReadableColumns()[index]
else:
return index + 1
else:
return QVariant()
def data(self, QModelIndex, role=None):
""" Gets an individual cell's value. Used by PyQt.
:param QModelIndex: An object with a row() and column() function, used to determine the correct cell
:param role:
:returns: A string representation of the cell's value
"""
if role == Qt.DisplayRole:
return str(self.__dataSet.iloc[QModelIndex.row()][QModelIndex.column()])
else:
return QVariant()
def get_dataset(self):
""" Retrieve the entire dataset
:returns: A pandas dataframe of the entire dataset
"""
return self.__dataSet
def get_epoch_dataset(self, start=0, end=0, step=1, isMilliseconds=False, sampleRatePerSecond=10):
""" Retrieve a subset of the dataset, by rows or milliseconds
:param start: The first row (or millisecond) to get
:param end: The last row (or millisecond) to get
:param step: How far between each row to return
:param isMilliseconds: To go by row, or by time
:param sampleRatePerSecond: If working in milliseconds, how many samples per second were taken
:returns: A pandas dataframe, sliced to the requested rows
"""
# Make sure we are working with integer values for the numerical parameters
try:
start = int(start)
except:
start = 0
try:
end = int(end)
except:
end = 0
try:
step = int(step)
except:
step = 1
try:
sampleRatePerSecond = int(sampleRatePerSecond)
except:
sampleRatePerSecond = 10
# If working time based, we need a conversion
if isMilliseconds:
start = int((start / 1000.0) * sampleRatePerSecond)
end = int((end / 1000.0) * sampleRatePerSecond)
# Sanity checks
if end > len(self.__dataSet):
end = len(self.__dataSet)
elif end < 0:
end = 0
# Correct the end first, so the start doesn't get left incorrect if modified
if start > end:
start = end
elif start < 0:
start = 0
# If the given end is 0, we actually want everything
if end == 0:
end = -1
return self.__dataSet[start:end:step]
| gpl-3.0 |
baseband-geek/singlepulse-visualizer | singlepulse_tools.py | 1 | 16869 | #!/usr/bin/python
# DM Sigma Time (s) Sample Downfact
import numpy as np
import matplotlib
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from pulsar_tools import disp_delay
import math
import sys
class SinglePulse:
"""
A class to contain all the relevant information for each single pulse detection
(i.e. S/N, box-car window size, DM, etc.). This is for ease of access during
plotting/other interactive stuff.
"""
def __init__(self, DM, sig, t, samp, dfact, inf):
self.dm = DM
self.sigma = sig
self.time = t
self.sample = samp
self.downfact = dfact
self.inf_file = inf
def print_params(self):
print "DM:",self.dm
print "Sigma:",self.sigma
print "Time:",self.time
print "Sample:",self.sample
print "Downfactor:",self.downfact
print "inf_file:",self.inf_file
class SPList:
"""
A class to contain a number of SinglePulse objects in a numpy.array and grant easy acces to paramter
lists of those objects. Contains the original list of SinglePulse objects, and a list
of each object's:
DM, sigma, time, sample, downfactor and inf_file name.
"""
def __init__(self, sp_list):
self.list = np.array(sp_list)
self.dm_list = np.array([sp.dm for sp in sp_list])
self.sigma_list = np.array([sp.sigma for sp in sp_list])
self.time_list = np.array([sp.time for sp in sp_list])
self.sample_list = np.array([sp.sample for sp in sp_list])
self.downfact_list = np.array([sp.downfact for sp in sp_list])
self.inf_list = np.array([sp.inf_file for sp in sp_list])
def print_lists(self):
print "DM list:",self.dm_list
print "Sigma list:",self.sigma_list
print "Time list:",self.time_list
print "Sample list:",self.sample_list
print "Downfactor list:",self.downfact_list
print "Inf_file list:",self.inf_list
def load_file(filename):
if filename==None:
print "No filename supplied to read..."
elif filename.endswith('.singlepulse'):
DM = np.genfromtxt(filename, comments="#", autostrip=True, usecols=0, skip_header=1)
Sigma = np.genfromtxt(filename, comments="#", autostrip=True, usecols=1, skip_header=1)
Time = np.genfromtxt(filename, comments="#", autostrip=True, usecols=2, skip_header=1)
Sample = np.genfromtxt(filename, comments="#", autostrip=True, usecols=3, skip_header=1)
Downfact = np.genfromtxt(filename, comments="#", autostrip=True, usecols=4, skip_header=1)
inf_file = np.genfromtxt(filename, comments="#", autostrip=True, usecols=5, dtype=str, skip_header=1)
sp = [SinglePulse(dm, sig, time, samp, dfact, inf) for dm, sig, time, samp, dfact, inf \
in zip(DM, Sigma, Time, Sample, Downfact, inf_file)]
return SPList(sp)
#return sp
elif filename.endswith('.flag'):
flags = np.genfromtxt(filename ,comments="#", autostrip=True)
if len(flags) == 0:
print "No flags/bad times provided. Not times in final output will be masked."
return flags
else:
print "File name suplied is not recognised. Must be either .singlepulse, .bad or .flag"
#def load_flags(filename):
# if filename==None:
# print "No filename supplied to read into flags..."
#
# flags = np.genfromtxt(filename ,comments="#", autostrip=True)
# if len(flags)==0:
# print "No flags provided. Not times in final output will be hidden."
#
# return flags
def obs_stats(time, flags):
# Not doing total time correctly, depends on last single pulse detection instead of observation time
flag_time = 0
# BWM: if there is only 1 masked region, flags is a list,
# if there are 2+ masked regions, flags is a list of lists.
if any(isinstance(l, np.ndarray) for l in flags):
for flag in flags:
flag_time += (float(flag[1]) - float(flag[0]))
else:
flag_time = float(flags[1]) - float(flags[0])
print "%.2f seconds flagged from %.2f seconds of data (%.2f percent)" % ( flag_time, time[-1], flag_time/time[-1]*100)
def flagfile(basename, max_DM=2097.2, freq_l=0.169615, freq_h=0.200335, padding=3):
"""
This function takes in a text file of bad 0 DM times and
writes out one flagged over the correct de-dispersive smearing
times, looking for overlaps along the way. There must be a text file named
basename.bad with rows indicating bad times for this to work.
"""
from subprocess import check_call
# BWM: originally planned to move this to the load_file function,
# but left it incase we JUST want to call flagfile
bads = np.genfromtxt(basename+'.bad', comments='#', autostrip=True)
# BWM: again because how np.genfromtxt works, if there is only 1 bad line, we get a list,
# if there are 2+ bad lines we get a list of lists. So have to check for np.ndarray
# instances and change method accordingly.
i = 0 # initialize counter for new list
flags = []
if any(isinstance(b, np.ndarray) for b in bads):
for bad in bads:
start = bad[0] - (padding + disp_delay(freq1=freq_l, freq2=freq_h, DM=max_DM)/1000.0)
if start < 0:
start = 0
stop = bad[1] + padding
if len(flags) > 0:
if start <= flags[-1][1]:
flags[-1][1] = stop
else:
flags.append([start, stop])
else:
flags.append([start, stop])
else:
start = bads[0] - (padding + disp_delay(freq1=freq_l, freq2=freq_h, DM=max_DM)/1000.0)
if start < 0:
start = 0
stop = bads[1] + padding
if len(flags) > 0:
if start <= flags[-1][1]:
flags[-1][1] = stop
else:
flags.append([start, stop])
else:
flags.append([start, stop])
# save new file as basename.flag
np.savetxt(basename+'.flag', flags, fmt='%d')
# call flag.sh script to creat masked singlepulse file
check_call(['flag.sh', basename])
#Popen(['flag.sh', basename]).communicate()[0]
def singlepulse_plot(basename=None, DMvTime=1, StatPlots=False, raw = False, threshold=5.0, movie=False):
"""
Plots up the flagged data, should switch to using genfromtxt when I have the time.
BWM: switched to using load_file to load singlepulse and flags. Uses genfromtxt.
"""
print "Make sure you have run sort_singlepulse.py to gather the single pulse events into the one file {0}.singelpulse".format(basename)
if raw:
data = load_file(basename + '.singlepulse')
#flag_times = False
else:
#flag_times = load_file(basename+'.bad')
try:
flagfile(basename) # BWM: should we be providing appropriate freqs and DM for this?
except:
print "No {}.bad file given. Creating one with entry [0 0]".format(basename)
f=open('{}.bad'.format(basename),'w')
f.write('0 0')
f.close()
print "Saved {}.bad".format(basename)
print "Retrying..."
flagfile(basename)
data = load_file(basename + '_flagged.singlepulse')
flags = load_file(basename + '.flag')
#for a in vars(data).items():
# print a[0]
#print [v[0] for v in vars(data).items()]
#sys.exit(0)
data = SPList(data.list[np.where(data.sigma_list >= threshold)])
#DM = [float(row.split()[0]) for row in data if float(row.split()[1]) >= threshold]
#Sigma = [float(row.split()[1]) for row in data if float(row.split()[1]) >= threshold]
#Time = [float(row.split()[2]) for row in data if float(row.split()[1]) >= threshold]
#Sample = [int(row.split()[3]) for row in data if float(row.split()[1]) >= threshold]
#Downfact = [int(row.split()[4]) for row in data if float(row.split()[1]) >= threshold]
#DM = data.dm_list
#Sigma = data.sigma_list
#Time = data.time_list
#Downfact = data.downfact_list
Downfact_float = data.downfact_list.astype(float)
fig = plt.figure()
cm = plt.cm.get_cmap('gist_rainbow')
if StatPlots:
ax0 = fig.add_subplot(231)
plt.hist(data.sigma_list, histtype='step', bins=int(0.2 * len(set(data.sigma_list))))
ax0.set_xlabel('Signal-to-Noise', fontsize=18)
ax0.set_ylabel('Number of Pulses', fontsize=18)
ax0.set_xlim([data.sigma_list.min(), data.sigma_list.max()])
ax1 = fig.add_subplot(232)
plt.hist(data.dm_list, histtype='step', bins=int(0.5 * len(set(data.dm_list))))
ax1.set_xlabel('DM ($\mathrm{pc\, cm^{-3}}$)', fontsize=18)
ax1.set_ylabel('Number of Pulses', fontsize=18)
ax1.set_xlim([data.dm_list.min(), data.dm_list.max()])
ax2 = fig.add_subplot(233, sharex=ax1)
# BWM: now shares x-axis with ax1, so changing DM on one will change range on the other
plt.scatter(data.dm_list, data.sigma_list, c=Downfact_float, cmap=cm, alpha=0.9)
ax2.set_ylabel('Signal-to-Noise', fontsize=18)
ax2.set_xlabel('DM ($\mathrm{p\, cm^{-3}}$)', fontsize=18)
ax2.set_xlim([data.dm_list.min(), data.dm_list.max()])
ax2.set_ylim([data.sigma_list.min(), data.sigma_list.max()])
ax3 = fig.add_subplot(212)
else:
ax3 = fig.add_subplot(111)
# TODO: need to figure out how (if at all) we can make the axis sharing work
# for x-axis to y-axis share
# ax3.set_title("Single Pulse Sigma")
ax3.set_xlabel('Time (s)', fontsize=18)
ax3.set_ylabel('DM ($\mathrm{pc\, cm^{-3}}$)', fontsize=18)
ax3.set_ylim([data.dm_list.min(), data.dm_list.max()])
ax3.set_xlim([data.time_list.min(), data.time_list.max()])
#ax3.set_ylabel('Time (s)', fontsize=18)
#ax3.set_xlabel('DM ($\mathrm{pc\, cm^{-3}}$)', fontsize=18)
#ax3.set_ylim([data.time_list.min(), data.time_list.max()])
#ax3.set_xlim([data.dm_list.min(), data.dm_list.max()])
#cm = plt.cm.get_cmap('gist_rainbow')
# grab axis3 size to allocate marker sizes
bbox_pix = ax3.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox_pix.width, bbox_pix.height
area = width * height # axes area in inches^2 (apparently)
#TODO: need to try and use something like percentiles to make sure that just one
# big pulse doesn't swamp the sizes or colorbars.
print data.sigma_list.min()
print data.sigma_list.max()
print np.percentile(data.sigma_list, 99.5)
Size = (3. * area / 2.) * (data.sigma_list**2 / np.percentile(data.sigma_list, 99.5))
Size[np.where(Size > np.percentile(data.sigma_list, 99.5))] = (3. * area / 2.) * np.percentile(data.sigma_list, 99.5)
#print len(Size[np.where(data.sigma_list>np.percentile(data.sigma_list, 99.5))])
print Size.min()
print Size.max()
obs_stats(data.time_list, flags)
# sc=ax3.scatter(Time,DM, s=Size, c=Sigma, vmin=min(Sigma), vmax=max(Sigma),\
# cmap=cm, picker=1)
sc = ax3.scatter(data.time_list, data.dm_list, s=Size, c=Downfact_float, cmap=cm, \
vmin=Downfact_float.min(), vmax=Downfact_float.max(), facecolors='none')
# sc = ax3.scatter(data.dm_list, data.time_list, s=Size, c=Downfact_float, cmap=cm, \
# vmin=Downfact_float.min(), vmax=Downfact_float.max(), picker=1, facecolor='none')
# leg = ax1.legend()
#plt.colorbar(sc, label="Sigma", pad=0.01)
#plt.colorbar(sc, label="Downfact", pad=0.01)
# BWM: can't seem to get the bottom plot to extend the entire width when the color bar is active.
fig.subplots_adjust(hspace=0.2, wspace=0.5)
if not raw:
if any(isinstance(l, np.ndarray) for l in flags):
for flag in flags:
flag_area = patches.Rectangle((float(flag[0]), data.dm_list.min()), \
(float(flag[1]) - float(flag[0])), \
(data.dm_list.max() - data.dm_list.min()), \
edgecolor='0', facecolor='0.66')
ax3.add_patch(flag_area)
else:
flag_area = patches.Rectangle((float(flags[0]), data.dm_list.min()), \
(float(flags[1]) - float(flags[0])), \
(data.dm_list.max() - data.dm_list.min()), \
edgecolor='0', facecolor='0.66')
ax3.add_patch(flag_area)
def onpick(event):
points = event.artist
ind = event.ind
mouseevent = event.mouseevent
print '\n'
print "Information for data points around click event %.4f, %.4f:" % (mouseevent.xdata, mouseevent.ydata)
for i in ind: # These are fudge factors to turn samples into ms.
if ( data.dm_list[i] < 150):
boxcar = data.downfact_list[i]
elif ( 150<= data.dm_list[i] < 823.2 ):
boxcar = data.downfact_list[i] * 2
elif ( 823.2 <= data.dm_list[i] < 1486.2):
boxcar = data.downfact_list[i] * 2
elif ( 1426.2 <= data.dm_list[i] < 2100):
boxcar = data.downfact_list[i] * 2
print "%.2f seconds, %.2f Sigma event detected at a DM of %.2f with a boxcar of: %d ms" % (data.time_list[i], data.sigma_list[i], data.dm_list[i], boxcar)
fig.canvas.mpl_connect('pick_event', onpick)
'''
ax2 = fig.add_subplot(122)
ax2.set_title("Single Pulse Boxcar")
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('DM (pc cm^-3)')
cm = plt.cm.get_cmap('RdYlBu')
sc2=ax2.scatter(Time,DM, c=Downfact_float, vmin=min(Downfact_float), vmax=max(Downfact_float), cmap=cm)
# leg = ax1.legend()
plt.colorbar(sc2)
if not raw:
for flag in flags:
flag_area = matplotlib.patches.Rectangle((float(flag.split()[0]), min(DM)), float(flag.split()[1])-float(flag.split()[0]), max(DM)-min(DM), edgecolor='0', facecolor='0.66')
ax2.add_patch(flag_area)
'''
fig.suptitle('Single Pulse Search results for ' + basename)
#plt.tight_layout(w_pad=0.1, h_pad=0.1)
#plt.savefig('test.png')
#plt.close(fig)
plt.show()
#obs_stats(Time, flags)
#def slice(infile, dm=None, timerange=None, sigma=None, downfact=None):
# # Not properly implemented yet
#
# data = read_singlepulse(infile)
#
# slices = [None]*5
#
# slice_map = {'dm':0, 'sigma':1, 'timerange':2, 'sample':3, 'downfact':4}
#
#
#
#
# DM = [row.split()[0] for row in data]
# Sigma = [row.split()[1] for row in data]
# Time = [row.split()[2] for row in data]
# Sample = [row.split()[3] for row in data]
# Downfact = [row.split()[4] for row in data]
#
# if dm:
# if type(dm) == type(0) or type(0.0):
# data = [row for row in data if dm <= row.split()[0]]
# elif type(dm) == type([]):
# data = [row for row in data if dm[0] <= row.split()[0] <= dm[1]]
# if sigma:
# if type(sigma) == type(0) or type(0.0):
# data = [row for row in data if sigma <= row.split()[1] ]
# elif type(sigma) == type([]):
# data = [row for row in data if sigma[0] <= row.split()[1] <= sigma[1]]
if __name__ == '__main__':
modes = ['interactive','movie']
from optparse import OptionParser, OptionGroup
parser = OptionParser(description="A python tool to plot, flag, and do otherwise with singlepulse search data from PRESTO")
parser.add_option("-m", "--mode", type="choice", choices=['interactive','movie'], help="Mode you want to run. {0}".format(modes))
parser.add_option("--dm_range", action="store", type="string", nargs=2, default=(0,2000), help="(Not yet implemented) The lowest and highest DM to plot. [default=%default]")
parser.add_option("--obsid", action="store", type="string", help="Observation ID or other basename for files. [No default]")
parser.add_option("--threshold", action="store", type="float", default=5.0, help="S/N threshold. [default=%default]")
(opts, args) = parser.parse_args()
if opts.mode == 'movie':
singlepulse_plot(basename=opts.obsid, DMvTime=1, StatPlots=True, raw = False, threshold=opts.threshold, movie=True)
elif opts.mode == 'interactive':
singlepulse_plot(basename=opts.obsid, DMvTime=1, StatPlots=True, raw=False, threshold=opts.threshold, movie=False)
else:
print "Somehow your non-standard mode snuck through. Try again with one of {0}".format(modes)
quit()
| mit |
ctogle/modular | src/dstoolm4/src/dstoolm4/writer.py | 2 | 3313 | import modular4.mpi as mmpi
import numpy
import PyDSTool as dst
import matplotlib.pyplot as plt
import pdb
def convert_reactions(ss,rs,vs,fs,es):
vns,vvs = zip(*vs) if vs else ([],[])
fns,fvs = zip(*fs) if fs else ([],[])
ens,evs = zip(*es) if es else ([],[])
def rxr(r):
if r in vns:return r
elif r in fns:
r = fvs[fns.index(r)]
return '('+str(r)+')'
else:
print('reaction rate is neither a function nor a variable!')
raise ValueError
def rxustr(rr,ru):
rxu = ' * '.join((u[1]+'**'+str(u[0]) if u[0] > 1 else u[1] for u in ru))
rxs = rxr(rr)
if rxu:rxs = rxu+' * '+rxs
return rxs
rhs,afs = {},{}
for sn,sv in ss:
if sn in ens:
base = evs[ens.index(sn)]
for fn in fns:
base = base.replace(fn,rxr(fn))
else:base = ''
rhs[sn] = base
for rr,ru,rp,rn in rs:
term = rxustr(rr,ru)
uvs,uns = zip(*ru) if ru else ([],[])
pvs,pns = zip(*rp) if rp else ([],[])
for sn,sv in ss:
m = 0
if sn in uns:m -= uvs[uns.index(sn)]
if sn in pns:m += pvs[pns.index(sn)]
if not m == 0:
smv = str(abs(m))+' * ' if abs(m) > 1 else ''
sms = ' - ' if m < 0 else ' + '
rhs[sn] += sms+smv+term
for sn,sv in ss:
if rhs[sn].startswith(' + '):
rhs[sn] = rhs[sn].replace(' + ','',1)
return rhs,afs
def get_simulator(e):
esp = e.simparameters
etime = e.end
ctime = e.capture
axes = e.pspace.axes
rhs,afs = convert_reactions(
esp['species'],esp['reactions'],
esp['variables'],esp['functions'],
esp['equations'])
dtargs = e.targets[:]
dtargs[0] = 't'
dshape = (len(dtargs),int(etime/ctime)+1)
if mmpi.root():
print('\n'+'-'*50)
print('converted rhs:')
for r in rhs:print('\t'+r+': '+rhs[r])
print('-'*50+'\n')
algparams = {
'atol': 1e-2,
#'stiff': False,
#'max_step': 0.0, ## CVODE INTERNAL USE ONLY
#'min_step': 0.0, ## CVODE INTERNAL USE ONLY
#'init_step': 0.01, ## DICTATES DT FOR FIXED OUTPUT MESH
'init_step':ctime*0.8, ## DICTATES DT FOR FIXED OUTPUT MESH
}
def simf(*args):
DSargs = dst.args(name = 'dstoolm_test')
dspars,dsics,dsvarspecs,dsfnspecs = {},{},{},{}
for vn,vv in esp['variables']:
if vn in axes:vv = args[axes.index(vn)+1]
dspars[vn] = vv
for sn,si in esp['species']:
if sn in axes:si = args[axes.index(sn)+1]
dsics[sn] = si
dsvarspecs[sn] = rhs[sn]
for fn,ft in afs:dsfnspecs[fn] = ft
DSargs.algparams = algparams
DSargs.pars = dspars
DSargs.fnspecs = dsfnspecs
DSargs.varspecs = dsvarspecs
DSargs.ics = dsics
DSargs.tdomain = [0,etime]
ode = dst.Generator.Vode_ODEsystem(DSargs)
traj = ode.compute('trajectory')
pts = traj.sample(dt = ctime)
data = numpy.zeros(dshape,dtype = numpy.float)
for dtx in range(len(dtargs)):
data[dtx] = pts[dtargs[dtx]]
return data
return simf
| mit |
kernc/scikit-learn | sklearn/cross_decomposition/pls_.py | 34 | 30531 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
jmargeta/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 4 | 2823 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(0) # Standardize data (easier to set the l1_ratio parameter)
###############################################################################
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
models = lasso_path(X, y, eps=eps)
alphas_lasso = np.array([model.alpha for model in models])
coefs_lasso = np.array([model.coef_ for model in models])
print("Computing regularization path using the positive lasso...")
models = lasso_path(X, y, eps=eps, positive=True)
alphas_positive_lasso = np.array([model.alpha for model in models])
coefs_positive_lasso = np.array([model.coef_ for model in models])
print("Computing regularization path using the elastic net...")
models = enet_path(X, y, eps=eps, l1_ratio=0.8)
alphas_enet = np.array([model.alpha for model in models])
coefs_enet = np.array([model.coef_ for model in models])
print("Computing regularization path using the positve elastic net...")
models = enet_path(X, y, eps=eps, l1_ratio=0.8, positive=True)
alphas_positive_enet = np.array([model.alpha for model in models])
coefs_positive_enet = np.array([model.coef_ for model in models])
###############################################################################
# Display results
pl.figure(1)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_lasso)
l2 = pl.plot(coefs_enet, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and Elastic-Net Paths')
pl.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
pl.axis('tight')
pl.figure(2)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_lasso)
l2 = pl.plot(coefs_positive_lasso, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Lasso and positive Lasso')
pl.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
pl.axis('tight')
pl.figure(3)
ax = pl.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = pl.plot(coefs_enet)
l2 = pl.plot(coefs_positive_enet, linestyle='--')
pl.xlabel('-Log(lambda)')
pl.ylabel('weights')
pl.title('Elastic-Net and positive Elastic-Net')
pl.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| unlicense |
ndingwall/scikit-learn | sklearn/manifold/tests/test_mds.py | 2 | 2566 | import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from sklearn.manifold import _mds as mds
from sklearn.utils._testing import ignore_warnings
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
with pytest.raises(ValueError):
mds.smacof(sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
with pytest.raises(ValueError):
mds.smacof(sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
with pytest.raises(ValueError):
mds.smacof(sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
# TODO: Remove in 0.26
def test_MDS_pairwise_deprecated():
mds_clf = mds.MDS(metric='precomputed')
msg = r"Attribute _pairwise was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
mds_clf._pairwise
# TODO: Remove in 0.26
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize("dissimilarity, expected_pairwise", [
("precomputed", True),
("euclidean", False),
])
def test_MDS_pairwise(dissimilarity, expected_pairwise):
# _pairwise attribute is set correctly
mds_clf = mds.MDS(dissimilarity=dissimilarity)
assert mds_clf._pairwise == expected_pairwise
| bsd-3-clause |
akuefler/fovea | examples/HH_neuron/HH_simple_demo.py | 1 | 20688 | """
This is the main run script for simple demo involving
Hodgkin-Huxley analysis.
"""
from PyDSTool.Toolbox.dssrt import *
from PyDSTool.Toolbox.phaseplane import *
import PyDSTool as dst
import matplotlib.pyplot as plt
import sys
from fovea.graphics import gui
from model_config import man as modelManager
from model_config import name, header
from HH_neuron import getHH_DSSRT, computePPlaneObjects, do_traj
from fovea.common import castNull, castNullArray
from math import *
from scipy.optimize import fsolve
## ----- ----- ----- ----- ----- ----- ##
## BUILD GENERATOR OBJECT ##
## ----- ----- ----- ----- ----- ----- ##
model = modelManager.instances[name]
gen = list(model.registry.values())[0]
# global define for convenience
plotter = gui.plotter
# ------------------------
def clip_to_pt():
"""Extract clipboard point from gui to a dictionary"""
pt = dst.filteredDict(gui.capturedPts['Master'], ['V', 'm', 'n'])
return {'V': pt['V'], 'Na.m': pt['m'], 'K.n': pt['n']}
class PPcallback(object):
"""
Dynamic figure axes class to support state-dependent user-interactive callbacks
"""
def __init__(self, xvar, num_x_points=30, num_y_points=30,
nullcX_style=None, nullcY_style=None,
vel_arrow_scale=1):
self.nully = None
self.nullx = None
self.num_x_points = num_x_points
self.num_y_points = num_y_points
if nullcX_style is None:
self.nullcX_style = 'b-'
else:
self.nullcX_style = nullcX_style
if nullcY_style is None:
self.nullcY_style = 'r-'
else:
self.nullcY_style = nullcY_style
self.last_scale = None
self.vel_arrow_scale = vel_arrow_scale
self.first_call = True # is reset by __call__
def dQ_dt(self, Qstr, ix, points):
"""
Utility to find finite difference of any named quantity in given points
at index ix
"""
if ix == 0:
ix = 1
pt1 = points[ix]
pt0 = points[ix-1]
t1 = points.indepvararray[ix]
t0 = points.indepvararray[ix-1]
return (pt1[Qstr]-pt0[Qstr])/(t1-t0)
class PPcallback_m(PPcallback):
def __call__(self, time, hard_reset=False):
"""Callback 'function' to take care of refreshing and re-computing
phase plane sub-plot when time is changed interactively.
"""
#print("\n\npplane call back, mouseUp =", gui._mouseUp)
fig_struct = plotter.figs['Master']
# combine all layer information
dynamicData = fig_struct.layers['nullclines_mV'].data.copy()
dynamicData.update(fig_struct.layers['horiz_PP'].data)
ax = gui.dynamicPlots['nullclines_mV']
#sc = fig_struct.layers['nullclines'].scale
sc = [ax.get_xlim(), ax.get_ylim()]
if hard_reset:
force = True
preComputed = False
print("\n HARD REFRESH for phase plane")
else:
preComputed = False
force = False
# REPLACE WITH A PROPER CACHE STRUCTURE
for key, val in dynamicData.items():
# dynamicData.keys are 'yNull_<time>' or 'xNull' or keys from horiz_PP etc.
# re-use computed nullclines if time is in "cache", i.e. it shows up in the keys
if key[6:] == str(time):
# cache hit!
val['display'] = True # not clear if this updates original data structure after copy
# also check to see whether has been rescaled
if self.last_scale == sc:
preComputed = True
else:
force = True
elif key[:5] != 'xNull':
# Use != to clean up collision lines and ohter nullcline that are not for
# this time value.
# switch off y-nullcline (V_inf) display for the other times
# yNull stays constant so keep that display=True
val['display'] = False
pt = gui.points[gui.ix]
p = fig_struct.layers['points_mV']
p.display = True
dV_dt = (pt['vinf']-pt['V'])/pt['tauv']
dm_dt = (pt['Na.minf']-pt['Na.m'])/pt['Na.taum']
dn_dt = (pt['K.ninf']-pt['K.n'])/pt['K.taun']
gui.addDataPoints([[pt['Na.m'], pt['Na.m']+dm_dt*self.vel_arrow_scale],
[pt['V'], pt['V']+dV_dt*self.vel_arrow_scale]],
layer='state_vel_mV', name='state', style=vel_vec_style, force=True)
if self.first_call:
gui.addDataPoints([gui.points['Na.m'], gui.points['V']],
layer='vfp_mV', name='traj', style='y')
# Virtual fixed point and linearized nullclines
if 'fast_m' in model.name:
with_jac = False
do_fps = False
fast_vars = ['Na.m']
else:
with_jac = False
do_fps = False
fast_vars = None
# update (or create) points
try:
gui.setPoint('state_pt', Point2D(pt['Na.m'], pt['V']), 'points_mV')
gui.setPoint('vinf_pt', Point2D(pt['Na.m'], pt['vinf']), 'points_mV')
except KeyError:
gui.addDataPoints(Point2D(pt['Na.m'], pt['V']), coorddict = {'x':
{'y':'y', 'style':'ko', 'layer':'points_mV', 'name':'state_pt'}})
gui.addDataPoints(Point2D(pt['Na.m'], pt['vinf']),coorddict = {'x':
{'y':'y', 'style':'bx', 'layer':'points_mV', 'name':'vinf_pt'}})
d = fig_struct.layers['nullclines_mV'].data
if not preComputed and gui._mouseUp:
## print("\nComputing phase plane...")
## print(" Current time = %.4f" % (time))
if self.nullx is None or force:
# compute m nullcline this once
only_var = None
else:
only_var = 'V'
# refresh wait notification
ax.text(0.05, 0.95, 'wait', transform=ax.transAxes, fontsize=22,
color='r', fontweight='bold', va='top')
gui.masterWin.canvas.draw()
# comment out for testing - use surrogate below
nulls = computePPlaneObjects(gen, 'Na.m', 'V', state=pt,
num_x_points=self.num_x_points,
num_y_points=self.num_y_points,
only_var=only_var, with_jac=with_jac,
do_fps=do_fps, fast_vars=fast_vars,
subdomain={'V': sc[1],
'Na.m': sc[0]})
# Surrogate data - much faster to test with
#self.nully = [[-100+time, -50+time/10., 0], [0.1, 0.4, 0.8]]
#self.nullx = [[-130, -80, 50], [0.2, 0.3, 0.4]]
self.nully = castNullArray(nulls['nullcY'])
gui.addDataPoints(self.nully, layer='nullclines_mV', style=self.nullcY_style,
name='yNull_'+str(time), force=force)
# delete update 'wait' notice
ax.texts = []
#ax.clear()
gui.clearAxes(ax)
if only_var is None:
# nullx is added second so will be the second line
self.nullx = castNullArray(nulls['nullcX'])
gui.addDataPoints(self.nullx, layer='nullclines_mV',
style=self.nullcX_style,
name='xNull', force=force)
#if force:
# rescale = sc
#else:
# rescale = None
gui.buildLayers(['nullclines_mV', 'horiz_PP', 'points_mV',
'state_vel_mV', 'vfp_mV'],
ax, rescale=sc, figure='Master')
self.last_scale = sc
## print(" Phase plane rebuild completed.\n")
else:
# just refresh display with the current selected data
gui.clearAxes(ax)
#if force:
# rescale = sc
#else:
# rescale = None
gui.buildLayers(['nullclines_mV', 'horiz_PP', 'points_mV',
'state_vel_mV', 'vfp_mV'],
ax, rescale=sc, figure='Master')
self.last_scale = sc
gui.masterWin.canvas.draw()
self.first_call = False
class PPcallback_n(PPcallback):
def __call__(self, time, hard_reset=False):
"""Callback 'function' to take care of refreshing and re-computing
phase plane sub-plot when time is changed interactively.
"""
#print("\n\npplane call back, mouseUp =", gui._mouseUp)
fig_struct = plotter.figs['Master']
# combine all layer information
dynamicData = fig_struct.layers['nullclines_nV'].data.copy()
ax = gui.dynamicPlots['nullclines_nV']
#sc = fig_struct.layers['nullclines'].scale
sc = [ax.get_xlim(), ax.get_ylim()]
if hard_reset:
force = True
preComputed = False
print("\n HARD REFRESH for phase plane")
else:
preComputed = False
force = False
# REPLACE WITH A PROPER CACHE STRUCTURE
for key, val in dynamicData.items():
# dynamicData.keys are 'yNull_<time>' or 'xNull' or keys from horiz_PP etc.
# re-use computed nullclines if time is in "cache", i.e. it shows up in the keys
if key[6:] == str(time):
# cache hit!
val['display'] = True # not clear if this updates original data structure after copy
# also check to see whether has been rescaled
if self.last_scale == sc:
preComputed = True
else:
force = True
elif key[:5] != 'xNull':
# Use != to clean up collision lines and ohter nullcline that are not for
# this time value.
# switch off y-nullcline (V_inf) display for the other times
# yNull stays constant so keep that display=True
val['display'] = False
pt = gui.points[gui.ix]
p = fig_struct.layers['points_nV']
p.display = True
dV_dt = (pt['vinf']-pt['V'])/pt['tauv']
dm_dt = (pt['Na.minf']-pt['Na.m'])/pt['Na.taum']
dn_dt = (pt['K.ninf']-pt['K.n'])/pt['K.taun']
gui.addDataPoints([[pt['K.n'], pt['K.n']+dn_dt*self.vel_arrow_scale],
[pt['V'], pt['V']+dV_dt*self.vel_arrow_scale]],
layer='state_vel_nV', name='state', style=vel_vec_style, force=True)
if self.first_call:
gui.addDataPoints([gui.points['K.n'], gui.points['V']],
layer='vfp_nV', name='traj', style='y')
gui.addDataPoints([gui.points['K.n'], gui.points['vinf']],
layer='vfp_nV', name='quasiVnull', style='m--')
## vs = np.linspace(sc[1][0], sc[1][1], 50)
## x = dict(pt).copy()
##
## def vinf(n, v):
## x['K.n'] = n
## x['V'] = v
## x['Na.m'] = gen.auxfns.Na_dssrt_fn_minf(v)
## # assume autonomous system
## return model.Rhs(0, x, asarray=False)['V']
##
## vinfs_inv_n = [fsolve(vinf, gen.auxfns.K_dssrt_fn_ninf(v), args=(v,)) for v in vs]
## plotter.addData([vinfs_inv_n, vs], layer='vfp_nV', name='vinf_fastm', style='b--')
# Virtual fixed point and linearized nullclines
if 'fast_m' in model.name:
with_jac = False
do_fps = False
fast_vars = ['Na.m']
else:
with_jac = False
do_fps = False
fast_vars = None
# update (or create) points
try:
gui.setPoint('state_pt', Point2D(pt['K.n'], pt['V']), 'points_nV')
gui.setPoint('vinf_pt', Point2D(pt['K.n'], pt['vinf']), 'points_nV')
except KeyError:
gui.addDataPoints(Point2D(pt['K.n'], pt['V']),coorddict = {'x':
{'y':'y', 'style':'ko', 'layer':'points_nV', 'name':'state_pt'}})
gui.addDataPoints(Point2D(pt['K.n'], pt['vinf']), coorddict = {'x':
{'y':'y', 'style':'bx', 'layer':'points_nV', 'name':'vinf_pt'}})
d = fig_struct.layers['nullclines_nV'].data
if not preComputed and gui._mouseUp:
## print("\nComputing phase plane...")
## print(" Current time = %.4f" % (time))
if self.nullx is None or force:
# compute m nullcline this once
only_var = None
else:
only_var = 'V'
# refresh wait notification
ax.text(0.05, 0.95, 'wait', transform=ax.transAxes, fontsize=22,
color='r', fontweight='bold', va='top')
gui.masterWin.canvas.draw()
# comment out for testing - use surrogate below
nulls = computePPlaneObjects(gen, 'K.n', 'V', state=pt,
num_x_points=self.num_x_points,
num_y_points=self.num_y_points,
only_var=only_var, with_jac=with_jac,
do_fps=do_fps, fast_vars=fast_vars,
subdomain={'V': sc[1],
'K.n': sc[0]})
# Surrogate data - much faster to test with
#self.nully = [[-100+time, -50+time/10., 0], [0.1, 0.4, 0.8]]
#self.nullx = [[-130, -80, 50], [0.2, 0.3, 0.4]]
self.nully = castNullArray(nulls['nullcY'])
gui.addDataPoints(self.nully, layer='nullclines_nV', style=self.nullcY_style,
name='yNull_'+str(time), force=force)
# delete update 'wait' notice
ax.texts = []
#ax.clear()
gui.clearAxes(ax)
if only_var is None:
# nullx is added second so will be the second line
self.nullx = castNullArray(nulls['nullcX'])
gui.addDataPoints(self.nullx, layer='nullclines_nV',
style=self.nullcX_style, name='xNull', force=force)
#if force:
# rescale = sc
#else:
# rescale = None
gui.buildLayers(['nullclines_nV', 'points_nV', 'state_vel_nV', 'vfp_nV'],
ax, rescale=sc, figure='Master')
self.last_scale = sc
## print(" Phase plane rebuild completed.\n")
else:
# just refresh display with the current selected data
gui.clearAxes(ax)
#if force:
# rescale = sc
#else:
# rescale = None
gui.buildLayers(['nullclines_nV', 'points_nV', 'state_vel_nV', 'vfp_nV'],
ax, rescale=sc, figure='Master')
self.last_scale = sc
gui.masterWin.canvas.draw()
self.first_call = False
# ------------------------------------------------------------------------
## Set dssrt_name to be for saved DSSRT data file
## Change for different parameter sets or just default to model name
dssrt_name = name
if 'typeI' in name and 'typeII' not in name:
### FOR TYPE I H-H ONLY
Kgmax = 100 # 100 # 80 original
dssrt_name = name+'_gK%i' % Kgmax
model.set(pars={'K.g': Kgmax,
'Na.g': 50})
else:
### FOR TYPE II H-H ONLY
Kgmax = 36 #39 or 42 with fast m # 36 original
dssrt_name = name+'_gK%i' % Kgmax
model.set(pars={'K.g': Kgmax,
'Ib.Ibias': 8.})
dV = 0.2
##test_ic = {'K.n': 0.37220277852490802,
## 'Na.m': 0.080387043479386036,
## 'V': -59.5}
##model.set(ics=test_ic)
## ----- ----- ----- ----- ----- ----- ##
## GET GENERATOR TRAJECTORY ##
## ----- ----- ----- ----- ----- ----- ##
orig_ics = model.query('ics')
if 'no_h' in name:
# no periodic orbit, just simulate for 12 ms
if 'typeI' in name and 'typeII' not in name:
t_end = 9
else:
t_end = 12
model.set(tdata=[0, t_end])
model.compute('ref')
ref_traj = model['ref']
else:
# get periodic orbit
t_end = 20
ref_traj, ref_pts, ref_tmin, ref_tmax = do_traj(model, t_end,
do_plot=False)
# re-sample traj at constant dt and declare to GUI
#trajPts = ref_traj.sample(dt=0.01)[:-40] # cheap way to avoid overlap from pts not being periodic
trajPts = ref_traj.sample(dt=0.01)[:len(ref_traj.sample(dt=0.01))-40] #[:-40] syntax not working in python 3
gui.addTimeFromPoints(trajPts)
## ----- ----- ----- ----- ----- ----- ##
## CREATE DIAGNOSTIC OBJECT ##
## ----- ----- ----- ----- ----- ----- ##
gui.clean()
gui.addFig('Master', title='Geometric Dynamic Analysis: '+dssrt_name,
tdom=[0, t_end], domain=[(-100,50), (0,1)])
coorddict = {'V':
{'x':'t', 'layer':'V','name':'V', 'style':'k-'},
'vinf':
{'x':'t', 'layer':'V','name':'Vinf', 'style':'k:'},
'Na.m':
{'x':'t', 'layer':'activs', 'name':'m', 'style':'g--'},
'Na.minf':
{'x':'t', 'layer':'activs', 'name':'minf', 'style':'g--'},
'K.n':
{'x':'t', 'layer':'activs', 'name':'n', 'style':'r-'},
'K.ninf':
{'x':'t', 'layer':'activs', 'name':'ninf', 'style':'r--'},
'tauv':
{'x':'t','layer':'activs','name':'tauv', 'style':'b:'},
'Na.taum':
{'x':'t', 'layer':'activs','name':'taum', 'style':'g:'},
'K.taun':
{'x':'t', 'layer':'activs','name':'taun', 'style':'r:'}
}
gui.addDataPoints(trajPts, coorddict = coorddict)
print("Key for activations / time scales window")
print(" Activations: line=activation, dashed=asymptotic")
print(" Time scales: dots")
print("Na: green")
print("K: red")
print("V: blue")
## ----- ----- ----- ----- ----- ----- ##
## COMPUTE V-m PHASE PLANE ##
## ----- ----- ----- ----- ----- ----- ##
# start at t = 2ms
gui.set_time(2)
# global style defs
vel_vec_style = {'color': 'k', 'linewidth': 2, 'linestyle': '-'}
vinf_vec_style = {'color': 'b', 'linewidth': 2, 'linestyle': '-'}
horiz_style = {'color': 'k', 'linestyle': '--', 'linewidth': 2}
def make_layer(xvar):
if xvar == 'Na.m':
suffix = 'mV'
else:
suffix = 'nV'
PP_layer_name = 'nullclines_'+suffix
gui.addLayer(PP_layer_name, dynamic=True)
if xvar == 'Na.m':
gui.addLayer('horiz_PP')
nullcX_style = 'g-'
PPclass = PPcallback_m
else:
# no horizon layer for K.n
nullcX_style = 'r-'
PPclass = PPcallback_n
PPplot = PPclass(xvar, nullcY_style = {'color': 'b', 'linestyle': '-', 'linewidth': 1},
nullcX_style=nullcX_style)
gui.dynamicPlotFns[PP_layer_name] = PPplot
gui.addLayer('points_'+suffix)
gui.addLayer('state_vel_'+suffix)
gui.addLayer('vfp_'+suffix)
make_layer('Na.m')
make_layer('K.n')
# sub-plot specs: (row, col) integer coords start at top left
dPlot11 = {'name': 'Trajectory',
'layers': ['V'],
'scale': [None, [-85, 45]],
'axes_vars': ['t', 'V'] }
dPlot12 = {'name': 'Activations, Time scales',
'layers': ['activs'],
'scale': [None, [0,1]],
'axes_vars': ['t', 'no units, ms'] }
pp1_name = 'Na-V Phaseplane'
pp1_dom = [[0,1], [-75,50]]
pp1_vars = ['m', 'V']
pp2_name = 'K-V Phaseplane'
pp2_dom = [[0.,1], [-75,50]]
pp2_vars = ['n', 'V']
# ISSUE: Rename 'scale' to 'domain' or 'extent'
dPlot21 = {'name': pp1_name,
'scale': pp1_dom,
'layers': ['nullclines_mV', 'horiz_PP', 'points_mV', 'state_vel_mV', 'vfp_mV'],
'axes_vars': pp1_vars}
dPlot22 = {'name': pp2_name,
'scale': pp2_dom,
'layers': ['nullclines_nV', 'points_nV', 'state_vel_nV', 'vfp_nV'],
'axes_vars': pp2_vars}
dPlot_dict = {'11': dPlot11, '12': dPlot12, '21': dPlot21, '22': dPlot22}
gui.setup(dPlot_dict, size=(14, 8))
gui.show_legends(subplot='Times')
gui.show()
gui.plus_dt(0)
halt = True | bsd-3-clause |
fzalkow/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.