repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mhue/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
chasquiwan/CoCoPy | modules/experiment/backend/backend.py | 2 | 1504 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# CoCoPy - A python toolkit for rotational spectroscopy
#
# Copyright (c) 2013 by David Schmitz ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the “Software”), to deal in the
# Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# MIT Licence (http://mit-license.org/)
#
################################################################################
import numpy as np
import matplotlib.pyplot as plt | mit |
jmontoyam/mne-python | mne/viz/topomap.py | 3 | 88971 | """Functions to plot M/EEG data e.g. topographies
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import math
import copy
from functools import partial
import numpy as np
from scipy import linalg
from ..baseline import rescale
from ..io.constants import FIFF
from ..io.pick import (pick_types, _picks_by_type, channel_type, pick_info,
_pick_data_channels)
from ..utils import _clean_names, _time_mask, verbose, logger, warn
from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
_check_delayed_ssp, _draw_proj_checkbox, figure_nobar,
plt_show, _process_times, DraggableColorbar,
_validate_if_list_of_axes)
from ..time_frequency import psd_multitaper
from ..defaults import _handle_default
from ..channels.layout import _find_topomap_coords
from ..io.meas_info import Info
def _prepare_topo_plot(inst, ch_type, layout):
""""Aux Function"""
info = copy.deepcopy(inst if isinstance(inst, Info) else inst.info)
if layout is None and ch_type is not 'eeg':
from ..channels import find_layout
layout = find_layout(info)
elif layout == 'auto':
layout = None
clean_ch_names = _clean_names(info['ch_names'])
for ii, this_ch in enumerate(info['chs']):
this_ch['ch_name'] = clean_ch_names[ii]
info._update_redundant()
info._check_consistency()
# special case for merging grad channels
if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']])):
from ..channels.layout import _pair_grad_sensors
picks, pos = _pair_grad_sensors(info, layout)
merge_grads = True
else:
merge_grads = False
if ch_type == 'eeg':
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
else:
picks = pick_types(info, meg=ch_type, ref_meg=False,
exclude='bads')
if len(picks) == 0:
raise ValueError("No channels of type %r" % ch_type)
if layout is None:
pos = _find_topomap_coords(info, picks)
else:
names = [n.upper() for n in layout.names]
pos = list()
for pick in picks:
this_name = info['ch_names'][pick].upper()
if this_name in names:
pos.append(layout.pos[names.index(this_name)])
else:
warn('Failed to locate %s channel positions from layout. '
'Inferring channel positions from data.' % ch_type)
pos = _find_topomap_coords(info, picks)
break
ch_names = [info['ch_names'][k] for k in picks]
if merge_grads:
# change names so that vectorview combined grads appear as MEG014x
# instead of MEG0142 or MEG0143 which are the 2 planar grads.
ch_names = [ch_names[k][:-1] + 'x' for k in range(0, len(ch_names), 2)]
pos = np.array(pos)[:, :2] # 2D plot, otherwise interpolation bugs
return picks, pos, merge_grads, ch_names, ch_type
def _plot_update_evoked_topomap(params, bools):
""" Helper to update topomaps """
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = params['evoked'].copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = new_evoked.data[np.ix_(params['picks'],
params['time_idx'])] * params['scale']
if params['merge_grads']:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
image_mask = params['image_mask']
pos_x, pos_y = np.asarray(params['pos'])[:, :2].T
xi = np.linspace(pos_x.min(), pos_x.max(), params['res'])
yi = np.linspace(pos_y.min(), pos_y.max(), params['res'])
Xi, Yi = np.meshgrid(xi, yi)
for ii, im in enumerate(params['images']):
Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
Zi[~image_mask] = np.nan
im.set_data(Zi)
for cont in params['contours']:
cont.set_array(np.c_[Xi, Yi, Zi])
params['fig'].canvas.draw()
def plot_projs_topomap(projs, layout=None, cmap=None, sensors=True,
colorbar=False, res=64, size=1, show=True,
outlines='head', contours=6, image_interp='bilinear',
axes=None):
"""Plot topographic maps of SSP projections
Parameters
----------
projs : list of Projection
The projections
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode (only works if ``colorbar=True``) the colors are
adjustable by clicking and dragging the colorbar with left and right
mouse button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range. Up
and down arrows can be used to change the colormap. If None (default),
'Reds' is used for all positive data, otherwise defaults to 'RdBu_r'.
If 'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
if layout is None:
from ..channels import read_layout
layout = read_layout('Vectorview-all')
if not isinstance(layout, list):
layout = [layout]
n_projs = len(projs)
nrows = math.floor(math.sqrt(n_projs))
ncols = math.ceil(n_projs / nrows)
if cmap == 'interactive':
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if axes is None:
plt.figure()
axes = list()
for idx in range(len(projs)):
ax = plt.subplot(nrows, ncols, idx + 1)
axes.append(ax)
elif isinstance(axes, plt.Axes):
axes = [axes]
if len(axes) != len(projs):
raise RuntimeError('There must be an axes for each picked projector.')
for proj_idx, proj in enumerate(projs):
axes[proj_idx].set_title(proj['desc'][:10] + '...')
ch_names = _clean_names(proj['data']['col_names'])
data = proj['data']['data'].ravel()
idx = []
for l in layout:
is_vv = l.kind.startswith('Vectorview')
if is_vv:
from ..channels.layout import _pair_grad_sensors_from_ch_names
grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
if grad_pairs:
ch_names = [ch_names[i] for i in grad_pairs]
idx = [l.names.index(c) for c in ch_names if c in l.names]
if len(idx) == 0:
continue
pos = l.pos[idx]
if is_vv and grad_pairs:
from ..channels.layout import _merge_grad_data
shape = (len(idx) // 2, 2, -1)
pos = pos.reshape(shape).mean(axis=1)
data = _merge_grad_data(data[grad_pairs]).ravel()
break
if len(idx):
im = plot_topomap(data, pos[:, :2], vmax=None, cmap=cmap[0],
sensors=sensors, res=res, axes=axes[proj_idx],
outlines=outlines, contours=contours,
image_interp=image_interp, show=False)[0]
if colorbar:
divider = make_axes_locatable(axes[proj_idx])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, cmap=cmap)
if cmap[1]:
axes[proj_idx].CB = DraggableColorbar(cbar, im)
else:
raise RuntimeError('Cannot find a proper layout for projection %s'
% proj['desc'])
tight_layout(fig=axes[0].get_figure())
plt_show(show)
return axes[0].get_figure()
def _check_outlines(pos, outlines, head_pos=None):
"""Check or create outlines for topoplot
"""
pos = np.array(pos, float)[:, :2] # ensure we have a copy
head_pos = dict() if head_pos is None else head_pos
if not isinstance(head_pos, dict):
raise TypeError('head_pos must be dict or None')
head_pos = copy.deepcopy(head_pos)
for key in head_pos.keys():
if key not in ('center', 'scale'):
raise KeyError('head_pos must only contain "center" and '
'"scale"')
head_pos[key] = np.array(head_pos[key], float)
if head_pos[key].shape != (2,):
raise ValueError('head_pos["%s"] must have shape (2,), not '
'%s' % (key, head_pos[key].shape))
if outlines in ('head', 'skirt', None):
radius = 0.5
l = np.linspace(0, 2 * np.pi, 101)
head_x = np.cos(l) * radius
head_y = np.sin(l) * radius
nose_x = np.array([0.18, 0, -0.18]) * radius
nose_y = np.array([radius - .004, radius * 1.15, radius - .004])
ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
.532, .510, .489])
ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
-.1313, -.1384, -.1199])
# shift and scale the electrode positions
if 'center' not in head_pos:
head_pos['center'] = 0.5 * (pos.max(axis=0) + pos.min(axis=0))
pos -= head_pos['center']
if outlines is not None:
# Define the outline of the head, ears and nose
outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
ear_left=(ear_x, ear_y),
ear_right=(-ear_x, ear_y))
else:
outlines_dict = dict()
if outlines == 'skirt':
if 'scale' not in head_pos:
# By default, fit electrodes inside the head circle
head_pos['scale'] = 1.0 / (pos.max(axis=0) - pos.min(axis=0))
pos *= head_pos['scale']
# Make the figure encompass slightly more than all points
mask_scale = 1.25 * (pos.max(axis=0) - pos.min(axis=0))
outlines_dict['autoshrink'] = False
outlines_dict['mask_pos'] = (mask_scale[0] * head_x,
mask_scale[1] * head_y)
outlines_dict['clip_radius'] = (mask_scale / 2.)
else:
if 'scale' not in head_pos:
# The default is to make the points occupy a slightly smaller
# proportion (0.85) of the total width and height
# this number was empirically determined (seems to work well)
head_pos['scale'] = 0.85 / (pos.max(axis=0) - pos.min(axis=0))
pos *= head_pos['scale']
outlines_dict['autoshrink'] = True
outlines_dict['mask_pos'] = head_x, head_y
outlines_dict['clip_radius'] = (0.5, 0.5)
outlines = outlines_dict
elif isinstance(outlines, dict):
if 'mask_pos' not in outlines:
raise ValueError('You must specify the coordinates of the image'
'mask')
else:
raise ValueError('Invalid value for `outlines')
return pos, outlines
def _draw_outlines(ax, outlines):
"""Helper for drawing the outlines for a topomap."""
outlines_ = dict([(k, v) for k, v in outlines.items() if k not in
['patch', 'autoshrink']])
for key, (x_coord, y_coord) in outlines_.items():
if 'mask' in key:
continue
ax.plot(x_coord, y_coord, color='k', linewidth=1, clip_on=False)
return outlines_
def _griddata(x, y, v, xi, yi):
"""Aux function"""
xy = x.ravel() + y.ravel() * -1j
d = xy[None, :] * np.ones((len(xy), 1))
d = np.abs(d - d.T)
n = d.shape[0]
d.flat[::n + 1] = 1.
g = (d * d) * (np.log(d) - 1.)
g.flat[::n + 1] = 0.
weights = linalg.solve(g, v.ravel())
m, n = xi.shape
zi = np.zeros_like(xi)
xy = xy.T
g = np.empty(xy.shape)
for i in range(m):
for j in range(n):
d = np.abs(xi[i, j] + -1j * yi[i, j] - xy)
mask = np.where(d == 0)[0]
if len(mask):
d[mask] = 1.
np.log(d, out=g)
g -= 1.
g *= d * d
if len(mask):
g[mask] = 0.
zi[i, j] = g.dot(weights)
return zi
def _plot_sensors(pos_x, pos_y, sensors, ax):
"""Aux function"""
from matplotlib.patches import Circle
if sensors is True:
for x, y in zip(pos_x, pos_y):
ax.add_artist(Circle(xy=(x, y), radius=0.003, color='k'))
else:
ax.plot(pos_x, pos_y, sensors)
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
res=64, axes=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head', image_mask=None,
contours=6, image_interp='bilinear', show=True,
head_pos=None, onselect=None):
"""Plot a topographic map as image
Parameters
----------
data : array, shape (n_chan,)
The data values to plot.
pos : array, shape (n_chan, 2) | instance of Info
Location information for the data points(/channels).
If an array, for each data point, the x and y coordinates.
If an Info object, it must contain only one data type and
exactly `len(data)` data channels, and the x/y coordinates will
be inferred from this Info object.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
res : int
The resolution of the topomap image (n pixels along each side).
axes : instance of Axes | None
The axes to plot to. If None, the current axes will be used.
names : list | None
List of channel names. If None, channel names are not plotted.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
If `True`, a list of names must be provided (see `names` keyword).
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axes plots). If None, nothing will be drawn.
Defaults to 'head'.
image_mask : ndarray of bool, shape (res, res) | None
The image mask to cover the interpolated surface. If None, it will be
computed from the outline.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
Returns
-------
im : matplotlib.image.AxesImage
The interpolated data.
cn : matplotlib.contour.ContourSet
The fieldlines.
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
data = np.asarray(data)
if isinstance(pos, Info): # infer pos from Info object
picks = _pick_data_channels(pos) # pick only data channels
pos = pick_info(pos, picks)
# check if there is only 1 channel type, and n_chans matches the data
ch_type = set(channel_type(pos, idx)
for idx, _ in enumerate(pos["chs"]))
info_help = ("Pick Info with e.g. mne.pick_info and "
"mne.channels.channel_indices_by_type.")
if len(ch_type) > 1:
raise ValueError("Multiple channel types in Info structure. " +
info_help)
elif len(pos["chs"]) != data.shape[0]:
raise ValueError("Number of channels in the Info object and "
"the data array does not match. " + info_help)
else:
ch_type = ch_type.pop()
if any(type_ in ch_type for type_ in ('planar', 'grad')):
# deal with grad pairs
from ..channels.layout import (_merge_grad_data, find_layout,
_pair_grad_sensors)
picks, pos = _pair_grad_sensors(pos, find_layout(pos))
data = _merge_grad_data(data[picks]).reshape(-1)
else:
picks = list(range(data.shape[0]))
pos = _find_topomap_coords(pos, picks=picks)
if data.ndim > 1:
raise ValueError("Data needs to be array of shape (n_sensors,); got "
"shape %s." % str(data.shape))
# Give a helpful error message for common mistakes regarding the position
# matrix.
pos_help = ("Electrode positions should be specified as a 2D array with "
"shape (n_channels, 2). Each row in this matrix contains the "
"(x, y) position of an electrode.")
if pos.ndim != 2:
error = ("{ndim}D array supplied as electrode positions, where a 2D "
"array was expected").format(ndim=pos.ndim)
raise ValueError(error + " " + pos_help)
elif pos.shape[1] == 3:
error = ("The supplied electrode positions matrix contains 3 columns. "
"Are you trying to specify XYZ coordinates? Perhaps the "
"mne.channels.create_eeg_layout function is useful for you.")
raise ValueError(error + " " + pos_help)
# No error is raised in case of pos.shape[1] == 4. In this case, it is
# assumed the position matrix contains both (x, y) and (width, height)
# values, such as Layout.pos.
elif pos.shape[1] == 1 or pos.shape[1] > 4:
raise ValueError(pos_help)
if len(data) != len(pos):
raise ValueError("Data and pos need to be of same length. Got data of "
"length %s, pos of length %s" % (len(data), len(pos)))
norm = min(data) >= 0
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
pos, outlines = _check_outlines(pos, outlines, head_pos)
ax = axes if axes else plt.gca()
pos_x, pos_y = _prepare_topomap(pos, ax)
if outlines is None:
xmin, xmax = pos_x.min(), pos_x.max()
ymin, ymax = pos_y.min(), pos_y.max()
else:
xlim = np.inf, -np.inf,
ylim = np.inf, -np.inf,
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
np.max(np.r_[xlim[1], mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
np.max(np.r_[ylim[1], mask_[:, 1]]))
# interpolate data
xi = np.linspace(xmin, xmax, res)
yi = np.linspace(ymin, ymax, res)
Xi, Yi = np.meshgrid(xi, yi)
Zi = _griddata(pos_x, pos_y, data, Xi, Yi)
if outlines is None:
_is_default_outlines = False
elif isinstance(outlines, dict):
_is_default_outlines = any(k.startswith('head') for k in outlines)
if _is_default_outlines and image_mask is None:
# prepare masking
image_mask, pos = _make_image_mask(outlines, pos, res)
mask_params = _handle_default('mask_params', mask_params)
# plot outline
linewidth = mask_params['markeredgewidth']
patch = None
if 'patch' in outlines:
patch = outlines['patch']
patch_ = patch() if callable(patch) else patch
patch_.set_clip_on(False)
ax.add_patch(patch_)
ax.set_transform(ax.transAxes)
ax.set_clip_path(patch_)
# plot map and countour
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=(xmin, xmax, ymin, ymax),
interpolation=image_interp)
# This tackles an incomprehensible matplotlib bug if no contours are
# drawn. To avoid rescalings, we will always draw contours.
# But if no contours are desired we only draw one and make it invisible .
no_contours = False
if contours in (False, None):
contours, no_contours = 1, True
cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
linewidths=linewidth)
if no_contours is True:
for col in cont.collections:
col.set_visible(False)
if _is_default_outlines:
from matplotlib import patches
patch_ = patches.Ellipse((0, 0),
2 * outlines['clip_radius'][0],
2 * outlines['clip_radius'][1],
clip_on=True,
transform=ax.transData)
if _is_default_outlines or patch is not None:
im.set_clip_path(patch_)
if cont is not None:
for col in cont.collections:
col.set_clip_path(patch_)
if sensors is not False and mask is None:
_plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
elif sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
idx = np.where(~mask)[0]
_plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
elif not sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
if isinstance(outlines, dict):
_draw_outlines(ax, outlines)
if show_names:
if names is None:
raise ValueError("To show names, a list of names must be provided"
" (see `names` keyword).")
if show_names is True:
def _show_names(x):
return x
else:
_show_names = show_names
show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
if ii not in show_idx:
continue
ch_id = _show_names(ch_id)
ax.text(p[0], p[1], ch_id, horizontalalignment='center',
verticalalignment='center', size='x-small')
plt.subplots_adjust(top=.95)
if onselect is not None:
ax.RS = RectangleSelector(ax, onselect=onselect)
plt_show(show)
return im, cont
def _make_image_mask(outlines, pos, res):
"""Aux function
"""
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[np.inf, mask_[:, 0]]),
np.max(np.r_[-np.inf, mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
np.max(np.r_[-np.inf, mask_[:, 1]]))
if outlines.get('autoshrink', False) is not False:
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
while np.any(outlier_points): # auto shrink
pos *= 0.99
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
image_mask = np.zeros((res, res), dtype=bool)
xi_mask = np.linspace(xmin, xmax, res)
yi_mask = np.linspace(ymin, ymax, res)
Xi_mask, Yi_mask = np.meshgrid(xi_mask, yi_mask)
pos_ = np.c_[Xi_mask.flatten(), Yi_mask.flatten()]
inds = _inside_contour(pos_, mask_)
image_mask[inds.reshape(image_mask.shape)] = True
return image_mask, pos
def _inside_contour(pos, contour):
"""Aux function"""
npos = len(pos)
x, y = pos[:, :2].T
check_mask = np.ones((npos), dtype=bool)
check_mask[((x < np.min(x)) | (y < np.min(y)) |
(x > np.max(x)) | (y > np.max(y)))] = False
critval = 0.1
sel = np.where(check_mask)[0]
for this_sel in sel:
contourx = contour[:, 0] - pos[this_sel, 0]
contoury = contour[:, 1] - pos[this_sel, 1]
angle = np.arctan2(contoury, contourx)
angle = np.unwrap(angle)
total = np.sum(np.diff(angle))
check_mask[this_sel] = np.abs(total) > critval
return check_mask
def _plot_ica_topomap(ica, idx=0, ch_type=None, res=64, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', colorbar=False,
title=None, show=True, outlines='head', contours=6,
image_interp='bilinear', head_pos=None, axes=None):
"""plot single ica map to axes"""
import matplotlib as mpl
from ..channels import _get_ch_type
from ..preprocessing.ica import _get_ica_map
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
if not isinstance(axes, mpl.axes.Axes):
raise ValueError('axis has to be an instance of matplotlib Axes, '
'got %s instead.' % type(axes))
ch_type = _get_ch_type(ica, ch_type)
data = _get_ica_map(ica, components=idx)
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(
ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines not in (None, 'head'):
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
data = np.atleast_2d(data)
data = data[:, data_picks]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
axes.set_title('IC #%03d' % idx, fontsize=12)
vmin_, vmax_ = _setup_vmin_vmax(data, vmin, vmax)
im = plot_topomap(data.ravel(), pos, vmin=vmin_, vmax=vmax_,
res=res, axes=axes, cmap=cmap, outlines=outlines,
image_mask=image_mask, contours=contours,
image_interp=image_interp, show=show)[0]
if colorbar:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid import make_axes_locatable
divider = make_axes_locatable(axes)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
cbar.ax.set_title('AU', fontsize=10)
_hide_frame(axes)
def plot_ica_components(ica, picks=None, ch_type=None, res=64,
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=False, title=None,
show=True, outlines='head', contours=6,
image_interp='bilinear', head_pos=None,
inst=None):
"""Project unmixing matrix on interpolated sensor topogrpahy.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
inst : Raw | Epochs | None
To be able to see component properties after clicking on component
topomap you need to pass relevant data - instances of Raw or Epochs
(for example the data that ICA was trained on). This takes effect
only when running matplotlib in interactive mode.
Returns
-------
fig : instance of matplotlib.pyplot.Figure or list
The figure object(s).
"""
from ..io import _BaseRaw
from ..epochs import _BaseEpochs
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid import make_axes_locatable
from ..channels import _get_ch_type
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(ica, picks=picks, ch_type=ch_type,
res=res, layout=layout, vmax=vmax,
cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title,
show=show, outlines=outlines,
contours=contours,
image_interp=image_interp,
head_pos=head_pos, inst=inst)
figs.append(fig)
return figs
elif np.isscalar(picks):
picks = [picks]
ch_type = _get_ch_type(ica, ch_type)
if cmap == 'interactive':
cmap = ('RdBu_r', True)
elif not isinstance(cmap, tuple):
cmap = (cmap, False if len(picks) > 2 else True)
data = np.dot(ica.mixing_matrix_[:, picks].T,
ica.pca_components_[:ica.n_components_])
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type,
layout)
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines not in (None, 'head'):
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(data), max_col=5)
if title is None:
title = 'ICA components'
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax in zip(picks, data, axes):
ax.set_title('IC #%03d' % ii, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
res=res, axes=ax, cmap=cmap[0], outlines=outlines,
image_mask=image_mask, contours=contours,
image_interp=image_interp, show=False)[0]
im.axes.set_label('IC #%03d' % ii)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
cbar.ax.set_title('AU', fontsize=10)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
if isinstance(inst, (_BaseRaw, _BaseEpochs)):
def onclick(event, ica=ica, inst=inst):
# check which component to plot
label = event.inaxes.get_label()
if 'IC #' in label:
ic = int(label[4:])
ica.plot_properties(inst, picks=ic, show=True)
fig.canvas.mpl_connect('button_press_event', onclick)
plt_show(show)
return fig
def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean', layout=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of specific time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file
was found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle will
be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches (only applies when plotting multiple
topomaps at a time).
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axis | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(tfr, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type,
layout)
if not show_names:
names = None
data = tfr.data
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[picks, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None or cmap == 'interactive':
cmap = ('Reds', True) if norm else ('RdBu_r', True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if axes is None:
fig = plt.figure()
ax = fig.gca()
else:
fig = axes.figure
ax = axes
_hide_frame(ax)
if title is not None:
ax.set_title(title)
fig_wrapper = list()
selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
itmin=itmin, itmax=itmax, ifmin=ifmin,
ifmax=ifmax, cmap=cmap[0], fig=fig_wrapper,
layout=layout)
im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
axes=ax, cmap=cmap[0], image_interp='bilinear',
contours=False, names=names, show_names=show_names,
show=False, onselect=selection_callback)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap[0])
cbar.set_ticks((vmin, vmax))
cbar.ax.tick_params(labelsize=12)
cbar.ax.set_title('AU')
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
plt_show(show)
return fig
def plot_evoked_topomap(evoked, times="auto", ch_type=None, layout=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, scale=None, scale_time=1e3, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
time_format='%01d ms', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None,
axes=None):
"""Plot topographic maps of specific time points of evoked data
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, 10 topographies
will be shown with a regular time spacing between the first and last
time instant. If "peaks", finds time points automatically by checking
for local maxima in global field power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found, the
layout is automatically generated from the sensor locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to "%01d ms"
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging (seconds).
For example, 0.01 would translate into window that starts 5 ms before
and ends 5 ms after a given time point. Defaults to None, which means
no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of the
same length as ``times`` (unless ``times`` is None). If instance of
Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(evoked, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable # noqa
mask_params = _handle_default('mask_params', mask_params)
mask_params['markersize'] *= size / 2.
mask_params['markeredgewidth'] *= size / 2.
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
evoked, ch_type, layout)
# project before picks
if proj is True and evoked.proj is not True:
data = evoked.copy().apply_proj().data
else:
data = evoked.data
evoked = evoked.copy().pick_channels(
[evoked.ch_names[pick] for pick in picks])
if axes is not None:
if isinstance(axes, plt.Axes):
axes = [axes]
times = _process_times(evoked, times, n_peaks=len(axes))
else:
times = _process_times(evoked, times, n_peaks=None)
space = 1 / (2. * evoked.info['sfreq'])
if (max(times) > max(evoked.times) + space or
min(times) < min(evoked.times) - space):
raise ValueError('Times should be between {0:0.3f} and '
'{1:0.3f}.'.format(evoked.times[0], evoked.times[-1]))
n_times = len(times)
nax = n_times + bool(colorbar)
width = size * nax
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
if axes is None:
plt.figure(figsize=(width, height))
axes = list()
for ax_idx in range(len(times)):
if colorbar: # Make room for the colorbar
axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
else:
axes.append(plt.subplot(1, n_times, ax_idx + 1))
elif colorbar:
warn('Colorbar is drawn to the rightmost column of the figure. Be '
'sure to provide enough space for it or turn it off with '
'colorbar=False.')
if len(axes) != n_times:
raise RuntimeError('Axes and times must be equal in sizes.')
if ch_type.startswith('planar'):
key = 'grad'
else:
key = ch_type
scale = _handle_default('scalings', scale)[key]
unit = _handle_default('units', unit)[key]
if not show_names:
names = None
w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
top_frame = max((0.05 if title is None else 0.25), .2 / size)
fig = axes[0].get_figure()
fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
top=1 - top_frame)
# find first index that's >= (to rounding error) to each time point
time_idx = [np.where(_time_mask(evoked.times, tmin=t,
tmax=None, sfreq=evoked.info['sfreq']))[0][0]
for t in times]
if average is None:
data = data[np.ix_(picks, time_idx)]
elif isinstance(average, float):
if not average > 0:
raise ValueError('The average parameter must be positive. You '
'passed a negative value')
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = float(average) / 2.
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
iter_times - ave_time,
iter_times + ave_time)):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
else:
raise ValueError('The average parameter must be None or a float.'
'Check your input.')
data *= scale
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
images, contours_ = [], []
if mask is not None:
_picks = picks[::2 if ch_type not in ['mag', 'eeg'] else 1]
mask_ = mask[np.ix_(_picks, time_idx)]
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines is not None:
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
vlims = [_setup_vmin_vmax(data[:, i], vmin, vmax, norm=merge_grads)
for i in range(len(times))]
vmin = np.min(vlims)
vmax = np.max(vlims)
if cmap == 'interactive':
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, False if len(times) > 2 else True)
for idx, time in enumerate(times):
tp, cn = plot_topomap(data[:, idx], pos, vmin=vmin, vmax=vmax,
sensors=sensors, res=res, names=names,
show_names=show_names, cmap=cmap[0],
mask=mask_[:, idx] if mask is not None else None,
mask_params=mask_params, axes=axes[idx],
outlines=outlines, image_mask=image_mask,
contours=contours, image_interp=image_interp,
show=False)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format is not None:
axes[idx].set_title(time_format % (time * scale_time))
if title is not None:
plt.suptitle(title, verticalalignment='top', size='x-large')
if colorbar:
# works both when fig axes pre-defined and when not
n_fig_axes = max(nax, len(fig.get_axes()))
cax = plt.subplot(1, n_fig_axes + 1, n_fig_axes + 1)
# resize the colorbar (by default the color fills the whole axes)
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (.7 + .1 / size) / n_fig_axes
cpos.x1 = cpos.x0 + .1 / n_fig_axes
cpos.y0 = .2
cpos.y1 = .7
cax.set_position(cpos)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
cbar.set_ticks([cbar.vmin, 0, cbar.vmax])
if cmap[1]:
for im in images:
im.axes.CB = DraggableColorbar(cbar, im)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
picks=picks, images=images, contours=contours_,
time_idx=time_idx, scale=scale, merge_grads=merge_grads,
res=res, pos=pos, image_mask=image_mask,
plot_update_proj_callback=_plot_update_evoked_topomap)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None, vmin=None,
vmax=None, cmap=None, outlines='head',
colorbar=False, cbar_fmt='%3.3f'):
"""Aux Function"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
_hide_frame(ax)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
if cmap == 'interactive':
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axes=ax,
cmap=cmap[0], image_interp='bilinear', contours=False,
outlines=outlines, show=False)
if colorbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.25)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_title(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
@verbose
def plot_epochs_psd_topomap(epochs, bands=None, vmin=None, vmax=None,
tmin=None, tmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None, layout=None,
cmap='RdBu_r', agg_fun=None, dB=False, n_jobs=1,
normalize=False, cbar_fmt='%0.3f',
outlines='head', axes=None, show=True,
verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted. If None, then first
available channel type from order given above is used. Defaults to
None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
axes : list of axes | None
List of axes to plot consecutive topographies to. If None the axes
will be created automatically. Defaults to None.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(epochs, ch_type)
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
epochs, ch_type, layout)
psds, freqs = psd_multitaper(epochs, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, picks=picks,
proj=proj, n_jobs=n_jobs)
psds = np.mean(psds, axis=0)
if merge_grads:
from ..channels.layout import _merge_grad_data
psds = _merge_grad_data(psds)
return plot_psds_topomap(
psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun, vmin=vmin,
vmax=vmax, bands=bands, cmap=cmap, dB=dB, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, axes=axes, show=show)
def plot_psds_topomap(
psds, freqs, pos, agg_fun=None, vmin=None, vmax=None, bands=None,
cmap=None, dB=True, normalize=False, cbar_fmt='%0.3f', outlines='head',
axes=None, show=True):
"""Plot spatial maps of PSDs
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
axes : list of axes | None
List of axes to plot consecutive topographies to. If None the axes
will be created automatically. Defaults to None.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
if bands is None:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
if agg_fun is None:
agg_fun = np.sum if normalize is True else np.mean
if normalize is True:
psds /= psds.sum(axis=-1)[..., None]
assert np.allclose(psds.sum(axis=-1), 1.)
n_axes = len(bands)
if axes is not None:
_validate_if_list_of_axes(axes, n_axes)
fig = axes[0].figure
else:
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError('No frequencies in band "%s" (%s, %s)'
% (title, fmin, fmax))
data = agg_fun(psds[:, freq_mask], axis=1)
if dB is True and normalize is False:
data = 10 * np.log10(data)
unit = 'dB'
else:
unit = 'power'
_plot_topomap_multi_cbar(data, pos, ax, title=title, vmin=vmin,
vmax=vmax, cmap=cmap, outlines=outlines,
colorbar=True, unit=unit, cbar_fmt=cbar_fmt)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
def plot_layout(layout, show=True):
"""Plot the sensor positions.
Parameters
----------
layout : None | Layout
Layout instance specifying sensor positions.
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None,
hspace=None)
ax.set_xticks([])
ax.set_yticks([])
pos = [(p[0] + p[2] / 2., p[1] + p[3] / 2.) for p in layout.pos]
pos, outlines = _check_outlines(pos, 'head')
_draw_outlines(ax, outlines)
for ii, (this_pos, ch_id) in enumerate(zip(pos, layout.names)):
ax.annotate(ch_id, xy=this_pos[:2], horizontalalignment='center',
verticalalignment='center', size='x-small')
plt_show(show)
return fig
def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
cmap, fig, layout=None):
"""Callback called from topomap for drawing average tfr over channels."""
import matplotlib.pyplot as plt
pos, _ = _check_outlines(pos, outlines='head', head_pos=None)
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = [i for i in range(len(pos)) if pos[i][0] < xmax and
pos[i][0] > xmin and pos[i][1] < ymax and pos[i][1] > ymin]
for idx, circle in enumerate(ax.artists):
if idx in indices:
circle.set_color('r')
else:
circle.set_color('black')
plt.gcf().canvas.draw()
if not indices:
return
data = tfr.data
if ch_type == 'mag':
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == 'grad':
from ..channels.layout import _pair_grad_sensors
grads = _pair_grad_sensors(tfr.info, layout=layout,
topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == 'eeg':
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info('Averaging TFR over channels ' + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = len(tfr.times) - 1 if itmax is None else min(itmax,
len(tfr.times) - 1)
ifmax = len(tfr.freqs) - 1 if ifmax is None else min(ifmax,
len(tfr.freqs) - 1)
if itmin is None:
itmin = 0
if ifmin is None:
ifmin = 0
extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
tfr.freqs[ifmax])
title = 'Average over %d %s channels.' % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
def _prepare_topomap(pos, ax):
"""Helper for preparing the topomap."""
pos_x = pos[:, 0]
pos_y = pos[:, 1]
_hide_frame(ax)
if any([not pos_y.any(), not pos_x.any()]):
raise RuntimeError('No position information found, cannot compute '
'geometries for topomap.')
return pos_x, pos_y
def _hide_frame(ax):
"""Helper to hide axis frame for topomaps."""
ax.get_yticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
def _init_anim(ax, ax_line, ax_cbar, params, merge_grads):
"""Initialize animated topomap."""
from matplotlib import pyplot as plt, patches
logger.info('Initializing animation...')
data = params['data']
items = list()
if params['butterfly']:
all_times = params['all_times']
for idx in range(len(data)):
ax_line.plot(all_times, data[idx], color='k')
vmin, vmax = _setup_vmin_vmax(data, None, None)
ax_line.set_yticks(np.around(np.linspace(vmin, vmax, 5), -1))
params['line'], = ax_line.plot([all_times[0], all_times[0]],
ax_line.get_ylim(), color='r')
items.append(params['line'])
if merge_grads:
from mne.channels.layout import _merge_grad_data
data = _merge_grad_data(data)
norm = True if np.min(data) > 0 else False
cmap = 'Reds' if norm else 'RdBu_r'
vmin, vmax = _setup_vmin_vmax(data, None, None, norm)
pos, outlines = _check_outlines(params['pos'], 'head', None)
pos_x = pos[:, 0]
pos_y = pos[:, 1]
_hide_frame(ax)
xlim = np.inf, -np.inf,
ylim = np.inf, -np.inf,
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
np.max(np.r_[xlim[1], mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
np.max(np.r_[ylim[1], mask_[:, 1]]))
res = 64
xi = np.linspace(xmin, xmax, res)
yi = np.linspace(ymin, ymax, res)
Xi, Yi = np.meshgrid(xi, yi)
params['Zis'] = list()
for frame in params['frames']:
Zi = _griddata(pos_x, pos_y, data[:, frame], Xi, Yi)
params['Zis'].append(Zi)
Zi = params['Zis'][0]
zi_min = np.min(params['Zis'])
zi_max = np.max(params['Zis'])
cont_lims = np.linspace(zi_min, zi_max, 7, endpoint=False)[1:]
_, pos = _make_image_mask(outlines, pos, res)
params.update({'vmin': vmin, 'vmax': vmax, 'Xi': Xi, 'Yi': Yi, 'Zi': Zi,
'extent': (xmin, xmax, ymin, ymax), 'cmap': cmap,
'cont_lims': cont_lims})
# plot map and countour
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=(xmin, xmax, ymin, ymax),
interpolation='bilinear')
plt.colorbar(im, cax=ax_cbar, cmap=cmap)
cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
patch_ = patches.Ellipse((0, 0),
2 * outlines['clip_radius'][0],
2 * outlines['clip_radius'][1],
clip_on=True,
transform=ax.transData)
im.set_clip_path(patch_)
text = ax.text(0.55, 0.95, '', transform=ax.transAxes, va='center',
ha='right')
params['text'] = text
items.append(im)
items.append(text)
for col in cont.collections:
col.set_clip_path(patch_)
outlines_ = _draw_outlines(ax, outlines)
params.update({'patch': patch_, 'outlines': outlines_})
return tuple(items) + tuple(cont.collections)
def _animate(frame, ax, ax_line, params):
"""Updates animated topomap."""
if params['pause']:
frame = params['frame']
time_idx = params['frames'][frame]
title = '%6.0f ms' % (params['times'][frame] * 1e3)
if params['blit']:
text = params['text']
else:
ax.cla() # Clear old contours.
text = ax.text(0.45, 1.15, '', transform=ax.transAxes)
for k, (x, y) in params['outlines'].items():
if 'mask' in k:
continue
ax.plot(x, y, color='k', linewidth=1, clip_on=False)
_hide_frame(ax)
text.set_text(title)
vmin = params['vmin']
vmax = params['vmax']
Xi = params['Xi']
Yi = params['Yi']
Zi = params['Zis'][frame]
extent = params['extent']
cmap = params['cmap']
patch = params['patch']
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=extent, interpolation='bilinear')
cont_lims = params['cont_lims']
cont = ax.contour(Xi, Yi, Zi, levels=cont_lims, colors='k', linewidths=1)
im.set_clip_path(patch)
items = [im, text]
for col in cont.collections:
col.set_clip_path(patch)
if params['butterfly']:
all_times = params['all_times']
line = params['line']
line.remove()
params['line'] = ax_line.plot([all_times[time_idx],
all_times[time_idx]],
ax_line.get_ylim(), color='r')[0]
items.append(params['line'])
params['frame'] = frame
return tuple(items) + tuple(cont.collections)
def _pause_anim(event, params):
"""Function for pausing and continuing the animation on mouse click"""
params['pause'] = not params['pause']
def _key_press(event, params):
"""Function for handling key presses for the animation."""
if event.key == 'left':
params['pause'] = True
params['frame'] = max(params['frame'] - 1, 0)
elif event.key == 'right':
params['pause'] = True
params['frame'] = min(params['frame'] + 1, len(params['frames']) - 1)
def _topomap_animation(evoked, ch_type='mag', times=None, frame_rate=None,
butterfly=False, blit=True, show=True):
"""Make animation of evoked data as topomap timeseries. Animation can be
paused/resumed with left mouse button. Left and right arrow keys can be
used to move backward or forward in time.
Parameters
----------
evoked : instance of Evoked
The evoked data.
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg'.
If None, first available channel type from ('mag', 'grad', 'eeg') is
used. Defaults to None.
times : array of floats | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None, frame rate = sfreq / 10.
Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is recommended
to use blit in combination with ``show=True``. If you intend to save
the animation it is better to disable blit. For MacOSX blit is always
disabled. Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
The figure.
anim : instance of matplotlib FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
from matplotlib import pyplot as plt, animation
if ch_type is None:
ch_type = _picks_by_type(evoked.info)[0][0]
if ch_type not in ('mag', 'grad', 'eeg'):
raise ValueError("Channel type not supported. Supported channel "
"types include 'mag', 'grad' and 'eeg'.")
if times is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
times = np.array(times)
if times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
if max(times) > evoked.times[-1] or min(times) < evoked.times[0]:
raise ValueError('All times must be inside the evoked time series.')
frames = [np.abs(evoked.times - time).argmin() for time in times]
blit = False if plt.get_backend() == 'MacOSX' else blit
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(evoked,
ch_type=ch_type,
layout=None)
data = evoked.data[picks, :]
data *= _handle_default('scalings')[ch_type]
fig = plt.figure()
offset = 0. if blit else 0.4 # XXX: blit changes the sizes for some reason
ax = plt.axes([0. + offset / 2., 0. + offset / 2., 1. - offset,
1. - offset], xlim=(-1, 1), ylim=(-1, 1))
if butterfly:
ax_line = plt.axes([0.2, 0.05, 0.6, 0.1], xlim=(evoked.times[0],
evoked.times[-1]))
else:
ax_line = None
if isinstance(frames, int):
frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int)
ax_cbar = plt.axes([0.85, 0.1, 0.05, 0.8])
ax_cbar.set_title(_handle_default('units')[ch_type], fontsize=10)
params = {'data': data, 'pos': pos, 'all_times': evoked.times, 'frame': 0,
'frames': frames, 'butterfly': butterfly, 'blit': blit,
'pause': False, 'times': times}
init_func = partial(_init_anim, ax=ax, ax_cbar=ax_cbar, ax_line=ax_line,
params=params, merge_grads=merge_grads)
animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params)
pause_func = partial(_pause_anim, params=params)
fig.canvas.mpl_connect('button_press_event', pause_func)
key_press_func = partial(_key_press, params=params)
fig.canvas.mpl_connect('key_press_event', key_press_func)
if frame_rate is None:
frame_rate = evoked.info['sfreq'] / 10.
interval = 1000 / frame_rate # interval is in ms
anim = animation.FuncAnimation(fig, animate_func, init_func=init_func,
frames=len(frames), interval=interval,
blit=blit)
fig.mne_animation = anim # to make sure anim is not garbage collected
plt_show(show, block=False)
if 'line' in params:
# Finally remove the vertical line so it does not appear in saved fig.
params['line'].remove()
return fig, anim
| bsd-3-clause |
anntzer/scikit-learn | examples/cross_decomposition/plot_pcr_vs_pls.py | 15 | 6952 | """
==================================================================
Principal Component Regression vs Partial Least Squares Regression
==================================================================
This example compares `Principal Component Regression
<https://en.wikipedia.org/wiki/Principal_component_regression>`_ (PCR) and
`Partial Least Squares Regression
<https://en.wikipedia.org/wiki/Partial_least_squares_regression>`_ (PLS) on a
toy dataset. Our goal is to illustrate how PLS can outperform PCR when the
target is strongly correlated with some directions in the data that have a
low variance.
PCR is a regressor composed of two steps: first,
:class:`~sklearn.decomposition.PCA` is applied to the training data, possibly
performing dimensionality reduction; then, a regressor (e.g. a linear
regressor) is trained on the transformed samples. In
:class:`~sklearn.decomposition.PCA`, the transformation is purely
unsupervised, meaning that no information about the targets is used. As a
result, PCR may perform poorly in some datasets where the target is strongly
correlated with *directions* that have low variance. Indeed, the
dimensionality reduction of PCA projects the data into a lower dimensional
space where the variance of the projected data is greedily maximized along
each axis. Despite them having the most predictive power on the target, the
directions with a lower variance will be dropped, and the final regressor
will not be able to leverage them.
PLS is both a transformer and a regressor, and it is quite similar to PCR: it
also applies a dimensionality reduction to the samples before applying a
linear regressor to the transformed data. The main difference with PCR is
that the PLS transformation is supervised. Therefore, as we will see in this
example, it does not suffer from the issue we just mentioned.
"""
print(__doc__)
# %%
# The data
# --------
#
# We start by creating a simple dataset with two features. Before we even dive
# into PCR and PLS, we fit a PCA estimator to display the two principal
# components of this dataset, i.e. the two directions that explain the most
# variance in the data.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
rng = np.random.RandomState(0)
n_samples = 500
cov = [[3, 3],
[3, 4]]
X = rng.multivariate_normal(mean=[0, 0], cov=cov, size=n_samples)
pca = PCA(n_components=2).fit(X)
plt.scatter(X[:, 0], X[:, 1], alpha=.3, label='samples')
for i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_)):
comp = comp * var # scale component by its variance explanation power
plt.plot([0, comp[0]], [0, comp[1]], label=f"Component {i}", linewidth=5,
color=f"C{i + 2}")
plt.gca().set(aspect='equal',
title="2-dimensional dataset with principal components",
xlabel='first feature', ylabel='second feature')
plt.legend()
plt.show()
# %%
# For the purpose of this example, we now define the target `y` such that it is
# strongly correlated with a direction that has a small variance. To this end,
# we will project `X` onto the second component, and add some noise to it.
y = X.dot(pca.components_[1]) + rng.normal(size=n_samples) / 2
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
axes[0].scatter(X.dot(pca.components_[0]), y, alpha=.3)
axes[0].set(xlabel='Projected data onto first PCA component', ylabel='y')
axes[1].scatter(X.dot(pca.components_[1]), y, alpha=.3)
axes[1].set(xlabel='Projected data onto second PCA component', ylabel='y')
plt.tight_layout()
plt.show()
# %%
# Projection on one component and predictive power
# ------------------------------------------------
#
# We now create two regressors: PCR and PLS, and for our illustration purposes
# we set the number of components to 1. Before feeding the data to the PCA step
# of PCR, we first standardize it, as recommended by good practice. The PLS
# estimator has built-in scaling capabilities.
#
# For both models, we plot the projected data onto the first component against
# the target. In both cases, this projected data is what the regressors will
# use as training data.
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
pcr = make_pipeline(StandardScaler(), PCA(n_components=1), LinearRegression())
pcr.fit(X_train, y_train)
pca = pcr.named_steps['pca'] # retrieve the PCA step of the pipeline
pls = PLSRegression(n_components=1)
pls.fit(X_train, y_train)
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
axes[0].scatter(pca.transform(X_test), y_test, alpha=.3, label='ground truth')
axes[0].scatter(pca.transform(X_test), pcr.predict(X_test), alpha=.3,
label='predictions')
axes[0].set(xlabel='Projected data onto first PCA component',
ylabel='y', title='PCR / PCA')
axes[0].legend()
axes[1].scatter(pls.transform(X_test), y_test, alpha=.3, label='ground truth')
axes[1].scatter(pls.transform(X_test), pls.predict(X_test), alpha=.3,
label='predictions')
axes[1].set(xlabel='Projected data onto first PLS component',
ylabel='y', title='PLS')
axes[1].legend()
plt.tight_layout()
plt.show()
# %%
# As expected, the unsupervised PCA transformation of PCR has dropped the
# second component, i.e. the direction with the lowest variance, despite
# it being the most predictive direction. This is because PCA is a completely
# unsupervised transformation, and results in the projected data having a low
# predictive power on the target.
#
# On the other hand, the PLS regressor manages to capture the effect of the
# direction with the lowest variance, thanks to its use of target information
# during the transformation: it can recogize that this direction is actually
# the most predictive. We note that the first PLS component is negatively
# correlated with the target, which comes from the fact that the signs of
# eigenvectors are arbitrary.
#
# We also print the R-squared scores of both estimators, which further confirms
# that PLS is a better alternative than PCR in this case. A negative R-squared
# indicates that PCR performs worse than a regressor that would simply predict
# the mean of the target.
print(f"PCR r-squared {pcr.score(X_test, y_test):.3f}")
print(f"PLS r-squared {pls.score(X_test, y_test):.3f}")
# %%
# As a final remark, we note that PCR with 2 components performs as well as
# PLS: this is because in this case, PCR was able to leverage the second
# component which has the most preditive power on the target.
pca_2 = make_pipeline(PCA(n_components=2), LinearRegression())
pca_2.fit(X_train, y_train)
print(f"PCR r-squared with 2 components {pca_2.score(X_test, y_test):.3f}")
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py | 4 | 7845 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import ctypes
import sys
import traceback
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from .backend_qt5 import QtCore
from .backend_qt5 import QtGui
from .backend_qt5 import FigureManagerQT
from .backend_qt5 import NavigationToolbar2QT
##### Modified Qt5 backend import
from .backend_qt5 import FigureCanvasQT
##### not used
from .backend_qt5 import show
from .backend_qt5 import draw_if_interactive
from .backend_qt5 import backend_version
######
from .qt_compat import QT_API
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt5agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAggBase(object):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
super(FigureCanvasQTAggBase, self).__init__(figure=figure)
self._agg_draw_pending = False
def drawRectangle(self, rect):
self._drawRect = rect
self.update()
def paintEvent(self, e):
"""
Copy the image from the Agg canvas to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
# if the canvas does not have a renderer, then give up and wait for
# FigureCanvasAgg.draw(self) to be called
if not hasattr(self, 'renderer'):
return
if DEBUG:
print('FigureCanvasQtAgg.paintEvent: ', self,
self.get_width_height())
if self.blitbox is None:
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
refcnt = sys.getrefcount(stringBuffer)
# convert the Agg rendered image -> qImage
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
# get the rectangle for the image
rect = qImage.rect()
p = QtGui.QPainter(self)
# reset the image area of the canvas to be the back-ground color
p.eraseRect(rect)
# draw the rendered image on to the canvas
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self._drawRect is not None:
p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine))
x, y, w, h = self._drawRect
p.drawRect(x, y, w, h)
p.end()
# This works around a bug in PySide 1.1.2 on Python 3.x,
# where the reference count of stringBuffer is incremented
# but never decremented by QImage.
# TODO: revert PR #1323 once the issue is fixed in PySide.
del qImage
if refcnt != sys.getrefcount(stringBuffer):
_decref(stringBuffer)
else:
bbox = self.blitbox
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h,
QtGui.QImage.Format_ARGB32)
# Adjust the stringBuffer reference count to work around a memory
# leak bug in QImage() under PySide on Python 3.x
if QT_API == 'PySide' and six.PY3:
ctypes.c_long.from_address(id(stringBuffer)).value = 1
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
# draw the zoom rectangle to the QPainter
if self._drawRect is not None:
p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine))
x, y, w, h = self._drawRect
p.drawRect(x, y, w, h)
p.end()
self.blitbox = None
def draw(self):
"""
Draw the figure with Agg, and queue a request for a Qt draw.
"""
# The Agg draw is done here; delaying causes problems with code that
# uses the result of the draw() to update plot elements.
FigureCanvasAgg.draw(self)
self.update()
def draw_idle(self):
"""
Queue redraw of the Agg buffer and request Qt paintEvent.
"""
# The Agg draw needs to be handled by the same thread matplotlib
# modifies the scene graph from. Post Agg draw request to the
# current event loop in order to ensure thread affinity and to
# accumulate multiple draw requests from event handling.
# TODO: queued signal connection might be safer than singleShot
if not self._agg_draw_pending:
self._agg_draw_pending = True
QtCore.QTimer.singleShot(0, self.__draw_idle_agg)
def __draw_idle_agg(self, *args):
if self.height() < 0 or self.width() < 0:
self._agg_draw_pending = False
return
try:
FigureCanvasAgg.draw(self)
self.update()
except Exception:
# Uncaught exceptions are fatal for PyQt5, so catch them instead.
traceback.print_exc()
finally:
self._agg_draw_pending = False
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
# If bbox is None, blit the entire canvas. Otherwise
# blit only the area defined by the bbox.
if bbox is None and self.figure:
bbox = self.figure.bbox
self.blitbox = bbox
l, b, w, h = bbox.bounds
t = b + h
self.repaint(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc.
Modified to import from Qt5 backend for new-style mouse events.
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
super(FigureCanvasQTAgg, self).__init__(figure=figure)
self._drawRect = None
self.blitbox = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| mit |
holdenk/spark | python/pyspark/sql/tests/test_pandas_cogrouped_map.py | 20 | 9306 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import array, explode, col, lit, udf, pandas_udf
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEqual(result[0]['sum1'], 165)
self.assertEqual(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegex(
NotImplementedError,
'Invalid return type.*ArrayType.*TimestampType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v array<timestamp>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegex(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
def test_self_join(self):
# SPARK-34319: self-join with FlatMapCoGroupsInPandas
df = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df.groupby("ColUmn").cogroup(
df.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long")
row = row.join(row).first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ph4m/eand | eand/demo/multidiff_regular_demo.py | 1 | 2528 | '''
eand package (Easy Algebraic Numerical Differentiation)
Copyright (C) 2013 Tu-Hoa Pham
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import matplotlib.pyplot as plt
from math import cos,sin
import numpy as np
from eand.mddig.multidiff import MultiDiff
print 'Initializing estimation parameters...'
Ns = 500
t1Min = -1.0
t1Max = 1.0
t2Min = -1.0
t2Max = 1.0
nSamplesSorted = [25,25]
dtVec = [(t1Max-t1Min)/nSamplesSorted[0],(t2Max-t2Min)/nSamplesSorted[1]]
tProto = np.arange(-1.,1.05,0.05)
t1 = []
t2 = []
for i in tProto:
for j in tProto:
t1.append(i)
t2.append(j)
n1 = 1
alpha1 = 3
beta1 = 3
n2 = 0
alpha2 = 0
beta2 = 0
T1 = 0.25
T2 = 0.25
'''
# 1D case
paramVec = [[n1,alpha1,beta1,T1]]
tVec = [t1]
'''
# 2D case
paramVec = [[n1,alpha1,beta1,T1],[n2,alpha2,beta2,T2]]
tVec = [t1,t2]
nDim = len(tVec)
nSamples = len(tVec[0])
signal = [cos(2*sum([t[i] for t in tVec])) for i in range(nSamples)]
tVec = np.array(tVec)
alreadySorted = 2
nSamplesSorted = [len(tProto),len(tProto)]
toNextValueSorted = [round(np.product([nSamplesSorted[dim] for dim in range(i+1,alreadySorted)])) for i in range(alreadySorted)]
dtVec = [tVec[i][toNextValueSorted[i]]-tVec[i][0] for i in range(alreadySorted)]
print 'Building differentiator...'
multiDiff = MultiDiff(paramVec,tVec,alreadySorted,nSamplesSorted)
print 'Plotting partition...'
multiDiff.plotPartition(0)
print 'Commencing differentiation...'
(tPostVec,dPost) = multiDiff.differentiate(signal)
print 'Calculating reference derivative...'
tSlice,dSlice = multiDiff.plotSlice(tPostVec,dPost,1,0.,0.1,0)
if sum([paramVec[i][0] for i in range(nDim)]) == 1:
dRef = [-2*sin(2*i) for i in tSlice]
elif sum([paramVec[i][0] for i in range(nDim)]) == 2:
dRef = [-4*cos(2*i) for i in tSlice]
plt.plot(tSlice,dRef,'r.')
print 'Plotting derivative estimate...'
multiDiff.plotScatter(tPostVec, dPost,0)
multiDiff.plotSurface(tPostVec, dPost,0)
plt.show()
print 'SUCCESS!'
'''
'''
| gpl-3.0 |
julioasotodv/spark-df-profiling | setup.py | 1 | 1412 | import os
__location__ = os.path.dirname(__file__)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='spark-df-profiling',
version='1.1.13',
author='Julio Antonio Soto de Vicente',
author_email='[email protected]',
packages=['spark_df_profiling'],
url='https://github.com/julioasotodv/spark-df-profiling',
license='MIT',
description='Create HTML profiling reports from Apache Spark DataFrames',
install_requires=[
"pandas>=0.17.0",
"matplotlib>=1.4",
"jinja2>=2.8",
"six>=1.9.0"
],
include_package_data = True,
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Framework :: IPython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords='spark pyspark report big-data pandas data-science data-analysis python jupyter ipython',
)
| mit |
ominux/scikit-learn | sklearn/svm/tests/test_bounds.py | 2 | 2389 | import nose
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.svm.sparse import LinearSVC as SparseSVC
from sklearn.linear_model.sparse.logistic import LogisticRegression as \
SparseLogRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = 'Test l1_min_c loss=%r %s %s %s' % \
(loss, X_label, Y_label, intercept_label)
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
('log', False): LogisticRegression(penalty='l1'),
('log', True): SparseLogRegression(penalty='l1'),
('l2', False): LinearSVC(loss='l2', penalty='l1', dual=False),
('l2', True): SparseSVC(loss='l2', penalty='l1', dual=False),
}[loss, sp.issparse(X)]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert (np.asanyarray(clf.coef_) == 0).all()
assert (np.asanyarray(clf.intercept_) == 0).all()
clf.C = min_c * 1.01
clf.fit(X, y)
assert (np.asanyarray(clf.coef_) != 0).any() or \
(np.asanyarray(clf.intercept_) != 0).any()
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
dfdx/masque | others/faceless-python-old/faceless/procrustes_simple.py | 1 | 3486 |
import cv2
from numpy import *
import matplotlib.delaunay as triang
import pylab
from helpers import *
def mean_of_columns(mat):
"""Returns 1-row matrix representing means of
corresponding columns
"""
return mat.mean(axis=0)
def center(shp):
return mean_of_columns(shp)
def move_to(shp, p):
"""Moves shape so that its center is in point p
"""
center = mean_of_columns(shp)
# print center
return (shp + p - center).astype(int)
def move_to_origin(pts):
"""Moves shape to origin and returns previous coordinates of center
of the shape
"""
avgs = mean_of_columns(pts)
for i in range(avgs.shape[0]):
pts[:, i] = pts[:, i] - avgs[i]
return avgs.tolist()
def dist_from_origin(pts):
"""Returns distance of every point from origin. Points should be given
as column matrix, where each row represents point in N-dimensional space"""
x2 = pow(pts[:, 0], 2)
y2 = pow(pts[:, 1], 2)
return sqrt(x2 + y2)
def scale_factor(pts, pts_r):
dist = dist_from_origin(pts)
dist_r = dist_from_origin(pts_r)
return mean(dist) / mean(dist_r)
def rotate(shp, angle):
rot_mat = array([[cos(angle), -sin(angle)],
[sin(angle), cos(angle)]])
shp_t = shp.transpose()
return dot(rot_mat, shp_t).transpose()
def rad_to_degree(radians):
return double(radians) * 180 / pi
def degree_to_rad(degrees):
return double(degrees) * pi / 180
def angle_diff(a, b):
# return arctan2(sin(a - b), cos(a - b))
# print a, b
d = a - b
if d > pi:
d -= 2 * pi
if d < -pi:
d += 2 * pi
return d
def angle_diff2(x, y):
return [angle_diff(a, b) for a, b in zip(x.tolist(), y.tolist())]
def move_to_center(shp, size=(480, 640, 3)):
hm = size[0] / 2
wm = size[1] / 2
return move_to(shp, (wm, hm))
def procrustes(shape_ref, shape):
""" Aligns N dimentional shape represented as N-column matrix "shape" to
reference shape "shape_ref" of same dimentions
"""
shp = copy(shape)
shp_ref = copy(shape_ref)
move_to_origin(shp)
center = move_to_origin(shp_ref)
scale = scale_factor(shp, shp_ref)
shp *= scale
rot = arctan2(shp[:, 1], shp[:, 0])
rot_r = arctan2(shp_ref[:, 1], shp_ref[:, 0])
rot_offset = -mean(angle_diff2(rot, rot_r))
shp = rotate(shp, rot_offset)
shp = move_to(shp, center)
return shp.astype(int)
# def procrustes(shapes):
# shp_ref = move_to_center(shapes[0])
# return [align(shp_ref, shp) for shp in shapes]
######### shortcuts #########
def show_aligned(shp_ref, shp):
shp_aligned = align(shp_ref, shp)
black = zeros((480, 640, 3))
drawshape(black, shp_ref, color=(255, 0, 0), method='poly')
drawshape(black, shp, color=(0, 255, 0), method='poly')
drawshape(black, shp_aligned, color=(255, 255, 255), method='poly')
show(black)
def plot_delaunay(pts):
x = pts[:, 0]
y = pts[:, 1]
cens, edg, tri, neig = triang.delaunay(x, y)
for t in tri:
t_i = [t[0], t[1], t[2], t[0]]
pylab.plot(x[t_i], y[t_i])
pylab.plot(x, y, 'o')
pylab.show()
def show_pdm(shapes, size=(480, 640)):
black = zeros(size)
shapes = procrustes(shapes)
for shp in shapes:
drawshape(black, shp, pt_sz=1)
show(black)
###########################
def go():
shapes = icaam_shapes()
show_pdm(shapes)
| mit |
jelledevleeschouwer/geomess | apps/IEEE802154/plot.py | 4 | 1038 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import sys
maxrange = 0
minrange = 0
fig1 = plt.figure()
ax = fig1.add_subplot(111, aspect = "equal")
def addnode(x,y,rm,rg, minrange, maxrange):
ax.plot([x],[y], 'ro')
circle=plt.Circle((x,y),rm,color='r', fill=False)
circleg=plt.Circle((x,y),rg,color='g', fill=False)
fig1.gca().add_artist(circle)
fig1.gca().add_artist(circleg)
if maxrange < x + rm:
maxrange = x + rm
if maxrange < y + rm:
maxrange = y + rm
if minrange > x - rm:
minrange = x - rm
if minrange > y - rm:
minrange = y - rm
return minrange,maxrange
f = open("map.csv") or sys.exit()
while True:
l = f.readline()
if l == "":
break
pos = l.split(',')
minrange,maxrange = addnode(int(pos[0]), int(pos[1]), int(pos[2]), int(pos[3]), minrange,maxrange)
f.close()
print("min max:",minrange," ",maxrange)
ax.set_xlim((minrange,maxrange))
ax.set_ylim((minrange,maxrange))
plt.show()
| gpl-2.0 |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/BernoulliNB.py | 1 | 2184 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.naive_bayes import BernoulliNB
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, BernoulliNB]
pipeline_parameters = {}
alpha_values = np.random.uniform(low=0., high=50., size=num_param_combinations)
fit_prior_values = np.random.choice([True, False], size=num_param_combinations)
binarize_values = np.random.uniform(low=0., high=1., size=num_param_combinations)
all_param_combinations = zip(alpha_values, fit_prior_values, binarize_values)
pipeline_parameters[BernoulliNB] = [{'alpha': alpha, 'fit_prior': fit_prior, 'binarize': binarize}
for (alpha, fit_prior, binarize) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
zdvresearch/fast15-paper-extras | tape_mounts/robot_mount_logs/src/robot_components.py | 1 | 135522 | __author__ = 'maesker'
from bisect import bisect_left, bisect_right
from collections import Counter
import json, os, string, re, csv, StringIO, sys, time, datetime, gzip, glob, calendar, math, copy
import threading, multiprocessing, Queue, gc
from multiprocessing.sharedctypes import Value, Array
from correlation import corrPearson
from StateMachine import FiniteStateMachine
import python_daemon
try:
from HandyHelperTools.plotting import fast_plot
import matplotlib.pyplot as plt
import numpy
#from TimeTools import get_epochs, Timer
except:
pass
## multi processing callback functions
def _cb_gzipdmp(prefix, result):
file = os.path.join("/tmp", "%s_%s.json"%(prefix,os.getpid()))
print file
with open(file, 'w') as f:
json.dump(result, f, indent=1)
def _cb_gzipload(file):
res = None
with open(file, 'r') as f:
res = json.load(f)
return res
def calc_correlation(inque, correlated, getcrtcb, prefix, interval_secs):
success = {}
for i in sorted(interval_secs):
success[i] = []
tmptotale, tmptotals = 0,0
while True:
try:
cid = inque.get(True,10)
except Queue.Empty:
break
procs = []
crt = getcrtcb(cid, False)
if crt:
#print 'running cid', cid
s,e = 0,0
for cid2 in sorted(correlated[cid]):
crt2 = getcrtcb(cid2, False)
if crt2:
for event in crt.data['data']:
(res, diffneg, diffpos) = crt2.will_see_mount_request(event[DATA_INDEX_REQM])
if res:
if diffpos <= max(interval_secs):
s += 1
index = bisect_left(interval_secs, diffpos)
# print diff, interval_secs[index]
success[interval_secs[index]].append(diffpos)
continue
# prefers positive diffs over negative, since positiv diffs are successful
# prefetces. Negativ diffs are recent mounts that could be used to prevent
# false prefetches
#print cid, cid2, event, diff
if abs(diffneg) <= max(interval_secs):
s += 1
index = bisect_left(interval_secs, diffneg)
# print diff, interval_secs[index]
success[interval_secs[index]].append(diffneg)
continue
# continue with negative mount request diff.
e+=1
# print "%s-%s: e:%i,\t s+:%i, \t s-:%i, \t"%(cid,cid2,e,s,s2)
tmptotale += e
tmptotals += s
else:
print "crt %s not found."%(cid)
output = {
'errorcnt': tmptotale,
'successcnt': tmptotals,
'interval' : success
}
file = os.path.join("/tmp", "%s_%s.json.gz"%(prefix,os.getpid()))
print 'writing ',file
with gzip.open(file, 'w') as f:
json.dump(output, f, indent=1)
print 'exit'
def add_correlated(d, a, b):
for i in [a,b]:
if i not in d.keys():
d[i]=set()
d[a].add(b)
d[b].add(a)
def _tapestats(inqueue, slot, crtlist, getcrt, drvlist, getdrv, atts, prefix):
x = {slot:{}}
while True:
try:
tsstr = inqueue.get(True,10)
ts = datetime.datetime.strptime(tsstr, "%Y-%m-%d %H:%M:%S")
epochts = int(calendar.timegm(ts.utctimetuple()))
epochse = epochts + get_slot_size_seconds(slot, ts.month, ts.year)
#print slot,ts, epochts, epochse
x[slot][tsstr]={VOL_MNT_LENGTH:0, CLN_TIME_LENGTH:0}
for crt in crtlist: # not sorted
x[slot][tsstr][VOL_MNT_LENGTH] += crt.get_volume(slot, ts)
wq2 = multiprocessing.Queue()
for drv in drvlist:
x[slot][tsstr][CLN_TIME_LENGTH] += sum(drv.estimate_cleaning_time(epochts,epochse))
for att in atts:
if att not in x[slot][tsstr].keys():
x[slot][tsstr][att] = 0
for drv in drvlist:
val = drv.get_pertime(slot, ts, att)
#print drv, slot, ts, att, val
x[slot][tsstr][att] += val
except Queue.Empty:
break
fn = "%s_%s"%(prefix,slot)
_cb_gzipdmp(fn,x)
def _cb_pertime(inqueue):
while True:
try:
obj = inqueue.get(True, 10)
obj.pertime()
except Queue.Empty:
break
def _cb_drvclntime(epochts,epochse, inqueue, output, lock):
res = 0
while True:
try:
drv = inqueue.get(True, 10)
except Queue.Empty:
break
lock.acquire(True)
output.value = output.value + res
lock.release()
def get_epoch(ts):
t = time.strptime(ts, '%Y%m%d:%H%M%S')
epoch = calendar.timegm(t)
return epoch
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
HID_INT_MAP = {'o':0}
DRV_INT_MAP = {'o':0}
def plot_dict(d, fn=None):
keys = sorted(d.keys())
n = numpy.arange(len(keys))
ticksstep = int(math.sqrt(len(keys)))
#print keys
for k in keys:
plt.bar(k, d[k])
plt.xticks(n[0::ticksstep], keys[0::ticksstep])
if fn:
plt.savefig(fn)
else:
plt.show()
def percentile(N, P):
"""
Find the percentile of a list of values
@parameter N - A list of values. N must be sorted.
@parameter P - A float value from 0.0 to 1.0
@return - The percentile of the values.
"""
if len(N)==0:
return 0.0
n = int(round(P * len(N) + 0.5))
return N[n-1]
def linebuf_write(buf, value):
if type(value)==type(0.0):
buf.write(";%.3f"%round(value,3))
elif type(value)==type(2):
buf.write(";%i"%value)
else:
buf.write(";%s"%value)
crtfilter_a = ['B' ,'P', 'Q', 'E', 'D']
crtfilter_b = ['MP', 'RT', 'ME', 'MS', 'M0', 'MA']
FULLRUN=False
TIME_BTWN_MNTS = 'tbm' # time between dismount and mount cartridge
CRT_MNT_LENGTH = 'ml' # time between mnt and dismount cartridge
VOL_MNT_LENGTH = "vol_ml" # volume mount length
CLN_TIME_LENGTH = 'clntm' # cleaning time
TOTAL_MNTS = 'tm'
TOTAL_MNT_TIME = 'tmt'
LATENCY_MNT = "latmnt" # time between mnt request and mount
LATENCY_DMNT = "latdismnt" # time between dismnt request and dismnt
LATENCY_DMV_DMCR = "lat_dmv_dmcr" # time between volume dismount and cartridge dismount request
LATENCY_HOME_AVG = "latency_mean"
HOME_TOTAL_OPS = "home_total_ops"
REMNT_30_SEC = 'rem_30_sec' # in percent
REMNT_60_SEC = 'rem_60_sec' # in percent
REMNT_120_SEC = 'rem_120_sec' # in percent
REMNT_300_SEC = 'rem_300_sec' # in percent
REMNT_600_SEC = 'rem_600_sec' # in percent
REMNT_1800_SEC = 'rem_1800_sec'
REMNT_3600_SEC = 'rem_3600_sec' # in percent
REMNT_7200_SEC = 'rem_7200_sec' # in percent
REMNT_86400_SEC = 'rem_86400_sec' # in percent
REMNT_regexpattern = re.compile("rem_([0-9]+)_sec")
HOTNESS = "hot" # hotness in hours
PERTIMESLOT = ['per_hour', 'per_day', 'per_week','per_month', 'per_year']
def get_slot_end(ts, slot):
return ts+datetime.timedelta(seconds=get_slot_size_seconds(slot,ts.month, ts.year))
def get_slot_size_seconds(slot, month, year):
hour = 3600.0
if slot=='per_hour':
return hour
elif slot=='per_day':
return hour*24
elif slot=='per_week':
return hour*24*7
elif slot == 'per_month':
if month in [0, 2, 4, 6, 7, 9, 11]:
return hour*24*31
if month in [3, 5, 8, 10]:
return hour*24*30
if not year % 4:
return hour*24*29
return hour*24*28
elif slot == 'per_year':
if not year%4:
return hour*24*366 # leap year
return hour*24*365
GLOBAL_CRT = [TOTAL_MNTS, REMNT_30_SEC, REMNT_60_SEC, REMNT_120_SEC, REMNT_300_SEC , REMNT_600_SEC, REMNT_1800_SEC, REMNT_3600_SEC, REMNT_7200_SEC,REMNT_86400_SEC ]
GLOBAL_DRV = [TOTAL_MNTS, REMNT_30_SEC, REMNT_60_SEC, REMNT_120_SEC, REMNT_300_SEC , REMNT_600_SEC, REMNT_1800_SEC, REMNT_3600_SEC, REMNT_7200_SEC,REMNT_86400_SEC ]
SPECIAL_CRT = [CRT_MNT_LENGTH, TIME_BTWN_MNTS, LATENCY_MNT, LATENCY_DMNT, LATENCY_DMV_DMCR]
SPECIAL_DRV = [CRT_MNT_LENGTH, TIME_BTWN_MNTS, LATENCY_MNT, LATENCY_DMNT, CLN_TIME_LENGTH]
GLOBAL_HM = [LATENCY_HOME_AVG, HOME_TOTAL_OPS]
HOME_OPERATION_EJECT = 0
HOME_OPERATION_INJECT = 1
DATA_INDEX_REQM = 0 # request mount
DATA_INDEX_M = 1 # mount done
DATA_INDEX_VOLUME = 2 # volume (mount-dismounts)
DATA_INDEX_REQD = 3 # request dismount cartridge
DATA_INDEX_D = 4 # dismount cartridge
DATA_INDEX_MH = 5
DATA_INDEX_DH = 6
DATA_INDEX_DRV = 7 # drive id, cartridge id or home id,
FSM_CRT_HOME = 1
FSM_CRT_LOADING = 3
FSM_CRT_VOLMNTED = 4
FSM_CRT_LOADED = 5
FSM_CRT_UNLOADING = 6
FSM_CRT_NEW = 7
FSM_CRT_IMPLICIT_CRTMNT = 10
FSM_CRT_VOLMNTDISMNTFU = 11
FSM_CRT_ERROR = 20
FSM_CRT_ERROR_FATAL = 21
FSM_CRT_D2DMV = 22
FSM_CRT_EJECTED = 23
FSM_DRV_LOADED = 30
#FSM_DRV_DISMNTREQ = 31
FSM_DRV_EMPTY = 32
FSM_DRV_ERROR = 40
FSM_DRV_ERROR_FATAL = 41
FSM_DRV_MAYBERECOVERED = 42
FSM_EVNT_INJECT = 50
FSM_EVNT_EJECT = 51
FSM_EVNT_MNTREQ = 52
FSM_EVNT_MNTCMPLT = 53
FSM_EVNT_VOLDMNT = 54
FSM_EVNT_VOLMNT = 55
FSM_EVNT_DISMNTCRTCMPLT = 56
FSM_EVNT_DISMNTCRTREQ = 57
FSM_EVNT_FATALERROR_1 = 60
FSM_EVNT_RECOVER_FAT1 = 61
FSM_EVNT_D2DMV = 62
FSM_EVNT_ROB1 = 63 # robot unable to find cartridge
FSM_EVNT_DELDRIVE = 64
class BaseStats:
def __init__(self, basedir):
self.data = {
'id':None,
'errors':[],
'inject':[],
'eject':[],
'data': [],
}
self.fsm = FiniteStateMachine()
self.basedir = basedir
if not os.path.exists(self.basedir ):
os.makedirs(self.basedir)
self.pertimeres = None
self.flushactive()
def active_is_nonempty(self):
for i in [self.active['reqm'], self.active['m'], self.active['reqd'], self.active['d']]:
return True
if max(self.active['vol'].keys()) > 0:
return True
return False
def flusherror(self):
if self.active_is_nonempty():
self.data['errors'].append(self.active)
self.flushactive()
def _transform_dictentry(self, entry):
y = []
if type(entry['vol'])==type({}):
for k,v in sorted(entry['vol'].items()):
y.append((int(k),int(v)))
if type(entry['vol']) == type([]):
for elem in sorted(entry['vol']):
for k,v in elem.items():
y.append((int(k),int(v)))
return [entry['reqm'], entry['m'], y, entry['reqd'], entry['d'], entry['mh'],entry['dh'], entry['drv']]
def dataappend(self):
if len(self.active['drv']) > 0 and \
(self.active['m'] > 0 or self.active['reqm']) and \
(self.active['d'] > 0 or self.active['reqd']):
# entry look at DATA_INDEX_xxx global variables
self.data['data'].append(self._transform_dictentry(self.active))
self.flushactive()
else:
self.flusherror()
def datainsert(self, entry):
for i in self.data['data']:
if i[DATA_INDEX_REQM] >= entry['d']:
index = self.data['data'].index(i)
self.data['data'].insert(index, self._transform_dictentry(entry))
return
self.data['data'].append(self._transform_dictentry(entry))
def flushactive(self):
self.active = {
'reqm': 0,
'm' : 0,
'vol':{0:0},
'reqd':0,
'd':0,
'mh':0,
'dh':0,
'drv':""
}
def __repr__(self):
return self.data['id']
def estimate_cleaning_time(self, start=None, end=None):
return []# dummy, cartridge doesnt need this
# -----------------------------
def collect_recovered_errors(self):
total = len(self.data['errors'])
def isvalid(entry):
for k in ['m','d','reqd','reqm']:
if not k in entry:
return False
if entry[k] <= 0:
return False
for k in ['mh','dh','drv']:
if not k in entry:
return False
if type(entry[k])==type(1) or len(entry[k]) < 2:
return False
self.datainsert(entry)
return True
self.data['errors'][:] = [x for x in self.data['errors'] if not isvalid(x)]
remaining = len(self.data['errors'])
return (total-remaining, remaining)
def get_successful_cycles(self):
return len(self.data['data'])
def get_failed_cycles(self):
return len(self.data['errors'])
def binsearch(self, data, entry, referenceindex, lo=0, hi=None):
if hi is None:
hi = max(0,len(data)-1 )
if abs(hi-lo)<2:
if len(data)<hi:
if data[hi][referenceindex] < entry:
return hi
return lo
pos = (hi+lo)/2
if data[pos][referenceindex]>entry:
hi = pos
else:
lo = pos
return self.binsearch(data,entry,referenceindex,lo,hi)
def robot_mount(self, epoch, drive, library):
most_likeley_entry = {}
startindex = self.binsearch(self.data['data'], epoch, DATA_INDEX_REQM)
while(startindex < len(self.data['data'])):
entry = self.data['data'][startindex]
if entry[DATA_INDEX_REQM] < epoch:
if entry[DATA_INDEX_DRV] == drive:
diff_m = entry[DATA_INDEX_M] - epoch
if abs(diff_m) <= 600 :
index = self.data['data'].index(entry)
most_likeley_entry[diff_m]=index
else:
break
startindex += 1
if len(most_likeley_entry)>0:
x = min(most_likeley_entry.keys())
#print x, most_likeley_entry[x]
entry = self.data['data'][most_likeley_entry[x]]
entry[DATA_INDEX_MH] = library
return True
else:
plausible_entries = {}
for entry in self.data['errors']:
if entry['drv'] == drive:
diff = entry['m'] - epoch
if abs(diff) <= 120:
#print "ok error entry found... ", entry
entry['mh']=library
return True
diff_mr = epoch - entry['reqm']
if diff_mr > 0 and diff_mr < 120:
entry['m']=epoch
entry['mh']=library
return True
if entry['d'] > epoch:
diff = entry['d'] - epoch
plausible_entries[diff] = entry
if len(plausible_entries)>0:
entry = plausible_entries[min(plausible_entries.keys())]
entry['m']=epoch
entry['mh']=library
return True
self.data['errors'].append({'m':epoch, 'reqm':0, 'reqd':0, 'd':0, 'vol':{0:0}, 'mh':library, 'drv':drive, 'dh':""})
#print "mount Nothing found"
def robot_dismount(self, epoch, drive, library):
most_likeley_entry = {}
startindex = self.binsearch(self.data['data'], epoch, DATA_INDEX_REQD)
while(startindex < len(self.data['data'])):
entry = self.data['data'][startindex]
#for entry in self.data['data']:
if entry[DATA_INDEX_REQD] < epoch:
if entry[DATA_INDEX_DRV] == drive:
diff_m = entry[DATA_INDEX_D] - epoch
if diff_m >= 0 or entry[DATA_INDEX_D]==0:
index = self.data['data'].index(entry)
most_likeley_entry[diff_m]=index
else:
break
startindex += 1
if len(most_likeley_entry)>0:
x = min(most_likeley_entry.keys())
#print x, most_likeley_entry[x]
entry = self.data['data'][most_likeley_entry[x]]
entry[DATA_INDEX_DH] = library
if entry[DATA_INDEX_D]==0:
entry[DATA_INDEX_D]=epoch
return True
else:
plausible_entries = {}
for entry in self.data['errors']:
if entry['drv']==drive:
if entry['d'] <= epoch:
diff = entry['d'] - epoch
if diff >= 0 and diff < 120:
#print "ok error entry found... ", entry
entry['dh']=library
return True
if entry['reqd'] <= epoch:
diff = epoch - entry['reqd']
if diff < 120:
#print "ok error entry found... ", entry
entry['d'] = epoch
entry['dh']=library
return True
if entry['m'] < epoch:
diff = epoch - entry['m']
plausible_entries[diff]=entry
if len(plausible_entries)>0:
entry = plausible_entries[min(plausible_entries.keys())]
entry['d']=epoch
entry['dh']=library
return True
self.data['errors'].append({'d':epoch, 'reqm':0, 'reqd':0, 'm':0, 'vol':{0:0}, 'dh':library, 'drv':drive, 'mh':""})
#print "dismount Nothing found"
def get_predecessor_of_entry(self, epoch, reference=DATA_INDEX_M): #
for e in self.data['data']:
if e[reference] > epoch:
index = self.data['data'].index(e) - 1
if index >= 0:
return self.data['data'][index]
def will_see_mount_request(self, epoch):
# return
# (False,None): not mounted, never willbe
# (True, x) : x diff to mount event
# : negativ x means already mounted
x = None
index = self.binsearch(self.data['data'], epoch, referenceindex=DATA_INDEX_REQM)
mindistneg = -sys.maxint
mindistpos = sys.maxint
breakat = 1800
while True:
if len(self.data['data'])>index:
entry = self.data['data'][index]
entrydiff = entry[DATA_INDEX_REQM] - epoch # negative or zero
if entry[DATA_INDEX_REQM] < epoch:
if entry[DATA_INDEX_REQD] >= epoch: # already mounted
return (True, max(-1799, entrydiff),mindistpos) # is still mounted
mindistneg = max(mindistneg, entrydiff)
else: # else switch to positive mounts
if entrydiff <= breakat:
mindistpos = entrydiff
return (True, mindistneg, mindistpos) # negative mount closer to zero
if entrydiff > breakat:
break
index += 1
else:
break
if mindistneg == -sys.maxint and mindistpos == sys.maxint:
return (False, None, None)
return (True, mindistneg, mindistpos)
# ---------------------
def coredump(self):
for i in self.data['data']:
print i
print self.active
print self
sys.exit(1)
def sanitycheck(self):
lm=None
restart = None
for i in self.data['data']:
if i[DATA_INDEX_REQM] > i[DATA_INDEX_M]:
print "Error ", i[DATA_INDEX_REQM], i[DATA_INDEX_M]
return False
if i[DATA_INDEX_REQD] > i[DATA_INDEX_D]:
print "Error ", i[DATA_INDEX_REQD], i[DATA_INDEX_D]
return False
if lm:
if lm[DATA_INDEX_D] > i[DATA_INDEX_M]:
print "Error, dmnt larger than following mnt",lm[DATA_INDEX_D], i[DATA_INDEX_M], self
print lm
print i
if 1:
index = self.data['data'].index(lm)
del self.data['data'][index]
index = self.data['data'].index(i)
del self.data['data'][index]
return self.sanitycheck()
else:
return False
lm = i
return True
def sortdata(self):
tmp = []
ref = self.data['data']
for i in ref:
index = ref.index(i)
if index > 0:
if ref[index][DATA_INDEX_M] < ref[index-1][DATA_INDEX_M]:
print "error mount timestamp"
if ref[index][DATA_INDEX_D] < ref[index-1][DATA_INDEX_D]:
print "error dismount timestamp"
if ref[index][DATA_INDEX_M] < ref[index-1][DATA_INDEX_D]:
print "error dismount timestamp m-d"
def checkerrors(self):
self.sortdata()
if not self.sanitycheck():
self.coredump()
if len(self.data['errors'])>0:
#print self.data['errors']
pass
return len(self.data['errors'])
def handle_special(self,res, data, key):
if len(data)>0:
res['%s_mean'%key] = numpy.mean(data)
res['%s_sum'%key] = sum(data)
res['%s_min'%key] = min(data)
res['%s_max'%key] = max(data)
res['%s_p05'%key] = percentile(sorted(data), 0.05)
res['%s_p10'%key] = percentile(sorted(data), 0.1)
#res['%s_p20'%key] = percentile(sorted(data), 0.2)
res['%s_p33'%key] = percentile(sorted(data), 0.33)
#res['%s_p40'%key] = percentile(sorted(data), 0.4)
res['%s_p50'%key] = percentile(sorted(data), 0.5)
res['%s_p67'%key] = percentile(sorted(data), 0.67)
#res['%s_p70'%key] = percentile(sorted(data), 0.7)
#res['%s_p80'%key] = percentile(sorted(data), 0.8)
res['%s_p90'%key] = percentile(sorted(data), 0.9)
res['%s_p95'%key] = percentile(sorted(data), 0.95)
def pertime(self, data=None):
def handle_lowlevel(ref, tmpts, curmnt, curdmnt, increment):
tmpmnt = max(curmnt, tmpts)
nextts=None
tmptsstring = tmpts.strftime("%Y-%m-%d %H:%M:%S")
if not tmptsstring in ref:
ref[tmptsstring] = {}
obj = ref[tmptsstring]
for x in [TOTAL_MNT_TIME, TOTAL_MNTS]:
if not x in obj.keys():
obj[x] = 0
if tmpmnt < tmpts:
print "wtf", tmpmnt, tmpts, increment, curdmnt
#return tmpts
return (False, tmpts)
if type(increment)==type("string"):
if increment=='month':
m = (tmpts.month)%12
y = tmpts.year
if (tmpts.month)/12:
y += 1
nextts = datetime.datetime(year=y, month=m+1, day=1)
elif increment == 'year':
nextts = datetime.datetime(year=tmpts.year+1, month=1, day=1)
else:
nextts = tmpts + increment
#print nextts, increment,tmpts
if tmpmnt <= nextts: # noch im momentanen ts weitermachen
if curdmnt <= nextts: ### case a
td = curdmnt - tmpmnt
obj[TOTAL_MNT_TIME] += td.seconds + td.days*24*60*60
obj[TOTAL_MNTS] += 1
else:
td = nextts - tmpmnt
obj[TOTAL_MNT_TIME] += td.seconds + td.days*24*60*60 ### case b
return (True, nextts)
#return handle_lowlevel(ref, nextts, nextts, curdmnt, increment)
else: # neuen ts nutzen
#return handle_lowlevel(ref, nextts, curmnt, curdmnt, increment)
return (True, nextts)
return (False, tmpts)
if self.pertimeres != None:
return self.pertimeres
res = {
'per_hour' : {},
'per_day' : {},
'per_week' : {},
'per_month' : {},
'per_year' : {}
}
file = os.path.join(self.basedir,"crt_%s_pertime.json"%self)
if not os.path.isfile(file):
if data==None:
data = self.data['data']
if len(data)>0:
init_mount = datetime.datetime.fromtimestamp(data[0][DATA_INDEX_M])
tmpts_perhour = datetime.datetime(init_mount.year, init_mount.month, init_mount.day, init_mount.hour,0)
tmpts_perday = datetime.datetime(init_mount.year, init_mount.month, init_mount.day,0,0)
tmpts_perweek = datetime.datetime(init_mount.year, init_mount.month, 1, 0, 0)
tmpts_permonth = datetime.datetime(init_mount.year, init_mount.month, 1, 0, 0)
tmpts_peryear = datetime.datetime(init_mount.year, 1, 1, 0, 0)
increment_hour = datetime.timedelta(hours=1)
increment_day = datetime.timedelta(days=1)
increment_week = datetime.timedelta(days=7)
for i in data:
mount = datetime.datetime.fromtimestamp(i[DATA_INDEX_M])
dismount = datetime.datetime.fromtimestamp(i[DATA_INDEX_D])
tmpts_perhour = datetime.datetime(mount.year, mount.month, mount.day, 0,0)
tmpts_perday = datetime.datetime(mount.year, mount.month, mount.day, 0,0)
# for (ref, tmpts, incr) in ['per_hour', ]
cont = True
while cont:
(cont,tmpts_perhour) = handle_lowlevel(res['per_hour'], tmpts_perhour, mount, dismount,increment_hour)
cont = True
while cont:
(cont,tmpts_perday) = handle_lowlevel(res['per_day'], tmpts_perday, mount, dismount,increment_day)
cont = True
while cont:
(cont,tmpts_perweek) = handle_lowlevel(res['per_week'], tmpts_perweek, mount, dismount,increment_week)
cont = True
while cont:
(cont,tmpts_permonth) = handle_lowlevel(res['per_month'], tmpts_permonth, mount, dismount,"month")
cont = True
while cont:
(cont,tmpts_peryear) = handle_lowlevel(res['per_year'], tmpts_peryear, mount, dismount,"year")
else:
print self, "no data available"
for slot in PERTIMESLOT: ## add attribute hotness
hotness = 0.0
for entryts in sorted(res[slot].keys()):
#print entryts
dt = datetime.datetime.strptime(entryts, "%Y-%m-%d %H:%M:%S")
totalslottime = get_slot_size_seconds(slot, dt.month, dt.year)
hotness = (hotness + res[slot][entryts].get(TOTAL_MNT_TIME,0)/totalslottime)/2.0
res[slot][entryts][HOTNESS] = hotness
for slot in PERTIMESLOT: ## print to csv
name = os.path.join(self.basedir, "%s_%s.csv"%(self,slot))
with open(name, 'w') as csv_file:
sortres = []
lineBuf = StringIO.StringIO()
cnt=1
for entryts in sorted(res[slot].keys()):
if lineBuf.len==0:
lineBuf.write("timestamp;index")
sortres = sorted(res[slot][entryts].keys())
for k in sortres:
lineBuf.write(";%s"%k)
lineBuf.write("\n")
lineBuf.write(";%s;%i"%(entryts,cnt))
for k in sortres:
linebuf_write(lineBuf, res[slot][entryts].get(k,0))
lineBuf.write("\n")
cnt+=1
csv_file.write(lineBuf.getvalue())
csv_file.close()
with open(file, 'w') as f:
json.dump(res, f, indent=1)
else:
print "reading file"
with open(file, 'r') as f:
res = json.load(f)
self.pertimeres = res
return res
def stats(self, atts, special):
res = {}
for a in atts:
match = REMNT_regexpattern.match(a)
if match:
slot = int(match.group(1))
lm = None
tmpres = []
for i in self.data['data']:
if lm != None:
if lm > 0:
if i[DATA_INDEX_M] - lm <= slot:
tmpres.append(1)
else:
tmpres.append(0)
else:
print 'no lm'
lm = i[DATA_INDEX_D]
res[a] = numpy.mean(tmpres)
elif a == 'id':
res['id'] = self.data['id']
elif a == TOTAL_MNTS:
res[a] = len(self.data['data'])
for a in special:
x = []
errcnt = 0
if a == CRT_MNT_LENGTH:
for i in self.data['data']:
if i[DATA_INDEX_M] > 0 and i[DATA_INDEX_D] > 0:
diff = i[DATA_INDEX_D] - i[DATA_INDEX_M]
if diff >= 0:
x.append(diff)
else:
print "Mount before dismount wtf"
errcnt += 1
#for (m,d) in i[DATA_INDEX_VOLUME].items():
# diff = int(m)-int(d)
# if diff > 0:
# x.append(diff)
else:
errcnt+=1
elif a == TIME_BTWN_MNTS:
lm = 0
for i in self.data['data']:
if lm > 1:
if i[DATA_INDEX_M] > 0:
x.append(i[DATA_INDEX_M]-lm)
lm = i[DATA_INDEX_D]
elif a == LATENCY_MNT:
for i in self.data['data']:
diff = i[DATA_INDEX_M] - i[DATA_INDEX_REQM]
if diff < 0 or diff > 6*3600:
errcnt += 1
#print "error diff:",diff, '\t', self
else:
x.append(diff)
elif a == LATENCY_DMNT:
for i in self.data['data']:
diff = i[DATA_INDEX_D] - i[DATA_INDEX_REQD]
if diff < 0 or diff > 6*3600:
errcnt+=1
#print "error diff:",diff, '\t', self
else:
x.append(diff)
elif a == LATENCY_DMV_DMCR:
for i in self.data['data']:
last_voldismnt = 0
for m,d in i[DATA_INDEX_VOLUME]:
last_voldismnt = max(last_voldismnt, d)
diff = i[DATA_INDEX_REQD] - last_voldismnt
if diff < 0 or diff > 6*3600:
errcnt+=1
#print "error diff:",diff, '\t', self
else:
x.append(diff)
# if errcnt:
# print self, a, len(x), 'error cnt', errcnt
self.handle_special(res, x, a)
return res
def get_pertime(self,slot, ts, attribute):
res = self.pertime()
uts = unicode(ts)
if slot in res.keys():
#print res[slot]
if uts in res[slot].keys():
#print res[slot][uts]
return res[slot][uts].get(attribute,0)
if type(ts) == type(" "):
dt = datetime.datetime.strptime(ts, "%Y-%m-%d %H:%M:%S")
if dt in res[slot].keys():
return res[slot][dt].get(attribute,0)
else:
print "no slot", slot
return 0
def handle_error(self, epoch, drive=None):
self.data['minor'].append(epoch)
def handle_mount_req(self, epoch): # either full cartridge mount or volume mount
drv = None
if not self.active['reqm']:
if int(epoch) < self.active['m'] or self.active['m'] == 0:
self.active['reqm'] = int(epoch)
drv = self.active['drv']
keys = self.active['vol'].keys()
keys.append(0)
m = max(keys)
if m > 0:
voldm = self.active['vol'][m]
if voldm <= int(epoch) or int(epoch) == self.active['reqm']: # ok, last mount dismount happened before current timestamp
self.active['vol'][int(epoch)]=0
drv = self.active['drv']
else:
print "voldm greater epoch", epoch
#self.coredump()
else:
if epoch != self.active['reqm']:
print "m less equal 0", epoch
#self.coredump()
return drv
def handle_dismount_req(self, epoch):
drv = None
if not self.active['reqd']:
self.active['reqd'] = int(epoch)
drv = self.active['drv']
else:
print "handle dismount request: why existing"
return drv
#def old_search_for_mountentry(self, epoch, reference=DATA_INDEX_M):
# for e in self.data['data']:
# if e[reference] >= epoch:
# #if e[DATA_INDEX_D] == 0:
# return self.data['data'].index(e)
# print "no entry found", self, epoch
# return None
#def old_entryexists(self, epoch, reference=DATA_INDEX_M, drive=""):
# for e in self.data['data']:
# if abs(e[reference] - epoch) <= 10:
# if e[DATA_INDEX_DRV] == drive:
# return True
#def old_errorentryexists(self, epoch, reference=DATA_INDEX_D):
# for e in self.data['errors']:
# diff = e[reference] - epoch
# if diff:
#q return True
class Cartridge(BaseStats):
def __init__(self,id, basedir):
BaseStats.__init__(self, os.path.join(basedir, "cartridges"))
self.data['id']=id
self.fsm.state=FSM_CRT_ERROR
self.fsm.add_transition(FSM_EVNT_INJECT, FSM_CRT_NEW, FSM_CRT_HOME, self._cb_inject)
self.fsm.add_transition(FSM_EVNT_INJECT, FSM_CRT_ERROR, FSM_CRT_HOME, self._cb_inject)
self.fsm.add_transition(FSM_EVNT_INJECT, FSM_CRT_EJECTED, FSM_CRT_HOME, self._cb_inject)
self.fsm.add_transition(FSM_EVNT_INJECT, FSM_CRT_ERROR_FATAL, FSM_CRT_HOME, self._cb_inject)
self.fsm.add_transition(FSM_EVNT_INJECT, FSM_CRT_HOME,FSM_CRT_HOME, None)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_LOADING,FSM_CRT_LOADING, None) # ignore double mount req
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_LOADING, None)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_HOME, FSM_CRT_LOADING, self._cb_mntreq)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_VOLMNTDISMNTFU, FSM_CRT_LOADING , self._cb_voldmfu_to_loading)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_ERROR, FSM_CRT_LOADING, self._cb_recover_to_loading)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_UNLOADING, FSM_CRT_LOADING, self._cb_unloading_timeout)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_LOADED, FSM_CRT_LOADING, self._cb_loaded_to_loading)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_D2DMV, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_VOLMNTED, FSM_CRT_LOADING, self._cb_volmnt_to_loading)
self.fsm.add_transition(FSM_EVNT_MNTREQ, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_D2DMV, FSM_CRT_VOLMNTED, self._cb_d2dmv_to_loaded)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_LOADING, FSM_CRT_VOLMNTED, self._cb_mntcmplt)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_VOLMNTED, self._cb_implicit_crtmnt_cmplt)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_HOME, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_UNLOADING, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_LOADED, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_VOLMNTED, FSM_CRT_VOLMNTED, None)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_CRT_VOLMNTDISMNTFU, FSM_CRT_ERROR, self._cb_error )
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_UNLOADING, FSM_CRT_UNLOADING, None)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_LOADING, FSM_CRT_LOADING, None)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_LOADED, FSM_CRT_VOLMNTED, self._cb_volmnt)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_VOLMNTED, FSM_CRT_VOLMNTDISMNTFU, None)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_HOME, FSM_CRT_IMPLICIT_CRTMNT, self._cb_implicit_crtmnt)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_IMPLICIT_CRTMNT,None)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_EJECTED, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_VOLMNTDISMNTFU,FSM_CRT_VOLMNTDISMNTFU,None )
self.fsm.add_transition(FSM_EVNT_VOLMNT, FSM_CRT_D2DMV, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_VOLMNTED, FSM_CRT_LOADED, self._cb_voldmnt)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_VOLMNTDISMNTFU, FSM_CRT_VOLMNTED , None)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_D2DMV, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_LOADING, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_LOADED, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_HOME, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_VOLDMNT, FSM_CRT_UNLOADING, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_D2DMV,FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_UNLOADING,FSM_CRT_UNLOADING, None)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_VOLMNTDISMNTFU, FSM_CRT_UNLOADING, self._cb_dismnt_crt_req)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_LOADED, FSM_CRT_UNLOADING, self._cb_dismnt_crt_req)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_VOLMNTED, FSM_CRT_UNLOADING, self._cb_implicit_crtdism_while_volmnt)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_HOME, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_LOADING, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTREQ, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_VOLMNTDISMNTFU, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_LOADING, FSM_CRT_HOME, self._cb_recover_to_home)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_UNLOADING, FSM_CRT_HOME, self._cb_dismnt_crt_cmplt)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_LOADED, FSM_CRT_HOME, self._cb_dismnt_crt_cmplt)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_ERROR, FSM_CRT_HOME, self._cb_recover_to_home)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_ERROR_FATAL, FSM_CRT_HOME, self._cb_recover_to_home)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_VOLMNTED, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_HOME,FSM_CRT_HOME,None) # ignore duplicates
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_D2DMV, FSM_CRT_HOME, self._cb_recover_to_home )
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_HOME, self._cb_recover_to_home )
#d2d mv
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_VOLMNTED,FSM_CRT_D2DMV, self._cb_loaded_to_d2dmv)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_D2DMV,FSM_CRT_D2DMV, None)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_LOADING,FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_UNLOADING,FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_ERROR_FATAL,FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_HOME, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_CRT_LOADED, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_CRT_LOADING, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_CRT_ERROR, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_CRT_HOME, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_CRT_D2DMV, FSM_CRT_ERROR, self._cb_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_ERROR, FSM_CRT_EJECTED, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_HOME, FSM_CRT_EJECTED, self._cb_eject)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_UNLOADING, FSM_CRT_EJECTED, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR, self._cb_eject)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_VOLMNTED, FSM_CRT_ERROR, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_LOADED, FSM_CRT_ERROR, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_LOADING, FSM_CRT_ERROR, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_IMPLICIT_CRTMNT, FSM_CRT_ERROR, self._cb_eject_error)
self.fsm.add_transition(FSM_EVNT_EJECT, FSM_CRT_EJECTED, FSM_CRT_EJECTED, None)
self.fsm.add_transition(FSM_EVNT_FATALERROR_1, FSM_CRT_D2DMV, FSM_CRT_ERROR , self._cb_error)
self.fsm.add_transition(FSM_EVNT_FATALERROR_1, FSM_CRT_ERROR_FATAL, FSM_CRT_ERROR_FATAL, self._cb_error)
self.fsm.add_transition(FSM_EVNT_FATALERROR_1, FSM_CRT_EJECTED, FSM_CRT_EJECTED, None)
for state in [FSM_CRT_HOME,FSM_CRT_LOADING,FSM_CRT_VOLMNTED,FSM_CRT_LOADED,FSM_CRT_UNLOADING,FSM_CRT_NEW,\
FSM_CRT_IMPLICIT_CRTMNT,FSM_CRT_VOLMNTDISMNTFU,FSM_CRT_ERROR]:
self.fsm.add_transition(FSM_EVNT_FATALERROR_1, state, FSM_CRT_ERROR_FATAL, self._cb_error)
def handle_event(self, event, args):
return self.fsm.handle_event(event, args)
def _cb_volmnt(self,event, args):
self.active['vol'][int(args['epoch'])] = 0
return (True, None)
def _cb_voldmnt(self,event, args):
m = max(self.active['vol'].keys())
if m > 0:
voldm = self.active['vol'][m]
if voldm <= 0 and m <= int(args['epoch']):
self.active['vol'][m]=int(args['epoch'])
return (True, None)
return (False, None)
def _cb_inject(self,event, args):
self.data['inject'].append(args['epoch'])
self.flusherror()
return (True,None)
def _cb_error(self,event, args):
if event == FSM_EVNT_DISMNTCRTCMPLT:
self.active['d'] = args['epoch']
ret={'drive':self.active['drv'],'reqm':self.active['reqm'],'reqd':self.active['reqd'],'cid':self.data['id']}
return (True, ret)
elif event == FSM_EVNT_VOLMNT:
self.active['vol'][int(args['epoch'])] = 0
elif event == FSM_EVNT_VOLDMNT:
return self._cb_voldmnt(event, args)
elif event == FSM_EVNT_MNTCMPLT:
self.active['m'] = args['epoch']
self.active['vol'] = {int(args['epoch']):0}
self.active['drv'] = args['drive']
return (True, {'drive':args['drive']})
elif event == FSM_EVNT_DISMNTCRTREQ:
self.active['reqd'] = args['epoch']
elif event == FSM_EVNT_MNTREQ:
self.active['reqm'] = args['epoch']
return (True, None)
def _cb_recover_to_loading(self, event, args):
self.flusherror()
return self._cb_mntreq(event,args)
def _cb_recover_to_home(self,event, args):
self.active['d'] = args['epoch']
self.flusherror()
return (True, args)
# - - - -
def _cb_error_fatal(self, event, args):
self.flusherror()
return (True,None)
def _cb_eject(self, event,args):
self.data['eject']=args['epoch']
return (True, None)
def _cb_eject_error(self,event, args):
self.flusherror()
return self._cb_eject(event,args)
def _cb_voldmfu_to_loading(self,event, args):
self.flusherror()
return self._cb_mntreq(event,args)
def _cb_volmnt_to_loading(self,event, args):
self.flusherror()
return self._cb_mntreq(event,args)
def _cb_loaded_to_loading(self,event, args):
self.flusherror()
return self._cb_mntreq(event,args)
def _cb_mntcmplt(self, event,args):
self.active['m'] = args['epoch']
self.active['vol'] = {int(args['epoch']):0}
self.active['drv'] = args['drive']
return (True, {'drive':args['drive']})
def _cb_mntreq(self,event, args):
self.active['reqm'] = args['epoch']
return (True, None)
def _cb_dismnt_crt_cmplt(self,event, args):
self.active['d'] = args['epoch']
ret ={'drive':self.active['drv'], 'reqm':self.active['reqm'], 'reqd':self.active['reqd'], 'cid':self.data['id']}
self.dataappend()
return (True, ret)
def _cb_dismnt_crt_req(self,event, args):
self.active['reqd'] = args['epoch']
return (True, None)
def _cb_implicit_crtmnt(self,event, args):
self.active['reqm'] = args['epoch']
return (True, None)
def _cb_implicit_crtmnt_cmplt(self,event, args):
return self._cb_mntcmplt(event,args)
def _cb_implicit_crtdism_while_volmnt(self,event, args):
(ret, a) = self._cb_voldmnt(event,args)
if ret:
return self._cb_dismnt_crt_req(event,args)
raise BaseException("What happened")
def _cb_unloading_timeout(self,event, args):
self.flusherror()
return self._cb_mntreq(event,args)
def _cb_loaded_to_d2dmv(self,event, args):
#self.active['d'] = args['epoch']
#self.active['reqd'] = args['epoch']
ret = {
'reqm' : self.active['reqm'],
#'reqd' : self.active['reqd']
'reqd' : args['epoch']
}
#self.dataappend()
#self.active['reqm'] = args['epoch']
self._cb_voldmnt(event,args)
return (True, ret)
def _cb_d2dmv_to_loaded(self,event, args):
self.active['drv'] = args['drive']
self.active['m'] = args['epoch']
self.active['reqm'] = args['epoch']
self.active['vol'] = {args['epoch']:0}
return (True, args)
## --------------------------------
def force_last_event_flush(self, epoch):
if len(self.data['data']) > 0:
evntdata = self.data['data'].pop()
e = {
'm':evntdata[DATA_INDEX_M],
'd':evntdata[DATA_INDEX_D],
'reqm':evntdata[DATA_INDEX_REQM],
'reqd':evntdata[DATA_INDEX_REQD],
'drv':evntdata[DATA_INDEX_DRV],
'vol':{}
}
for [m,d] in evntdata[DATA_INDEX_VOLUME]:
#e['vol'].append({m:d})
e['vol'][int(m)]=int(d)
self.data['errors'].append(e)
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch})
##-------------------
def handle_enter(self, epoch):
self.data['enter'] = epoch
#self.tmp_state = {'m':0, 'd':0, 'reqM':0, 'reqD':0}
def handle_eject(self, epoch):
self.data['eject'] = epoch
#self.tmp_state = {'m':0, 'd':0, 'reqM':0, 'reqD':0}
def get_volume(self, slot, ts):
if slot == 'per_year':
nextts = datetime.datetime(year=ts.year+1, month=1, day=1, hour=0 ,minute=0)
if slot == 'per_month':
m = (ts.month)%12
y = ts.year
if (ts.month)/12:
y += 1
nextts = datetime.datetime(year=y, month=m+1, day=1)
if slot == 'per_week':
nextts = ts + datetime.timedelta(days=7)
if slot == 'per_day':
nextts = ts + datetime.timedelta(days=1)
if slot == 'per_hour':
nextts = ts + datetime.timedelta(hours=1)
res = 0
tsep = unix_time(ts)
nexttsep = unix_time(nextts)
index = self.binsearch(self.data['data'],tsep,DATA_INDEX_M)
while True:
if len(self.data['data']) > index:
e = self.data['data'][index]
for m,d in e[DATA_INDEX_VOLUME]:
if m != 0 and d != 0:
end = min(nexttsep, d)
if m >= tsep and m <= nexttsep:
res += d - m
elif m > nexttsep:
break
if e[DATA_INDEX_M]> nexttsep:
break
index += 1
else:
break
return res
#print m,d
def get_tbm(self):
res = []
lastevent = None
for event in self.data['data']:
if lastevent!=None:
res.append(event[DATA_INDEX_REQM] - lastevent[DATA_INDEX_D])
lastevent=event
return res
def get_latency(self):
res = []
for event in self.data['data']:
lm = event[DATA_INDEX_M] - event[DATA_INDEX_REQM]
ld = event[DATA_INDEX_D] - event[DATA_INDEX_REQD]
if ld < 1800:
res.append(ld)
if lm < 1800:
res.append(lm)
return res
class Drive(BaseStats):
def __init__(self, id, basedir):
BaseStats.__init__(self,os.path.join(basedir, "drives"))
self.data['id']=id
self.data['data'] =[] # (dummy, mount epoch, dummy, dismount epoch, cid)
self.data['cleaning']=[]
self.fsm.state=FSM_DRV_ERROR
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_DRV_LOADED, FSM_DRV_EMPTY, self._cb_dmntcmplt)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_DRV_EMPTY, FSM_DRV_LOADED, self._cb_mountcmplt)
# from error states
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_DRV_LOADED, FSM_DRV_ERROR, self._cb_loadedtoerror)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_DRV_ERROR_FATAL, FSM_DRV_ERROR_FATAL, None)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_DRV_MAYBERECOVERED, FSM_DRV_ERROR_FATAL, self._cb_error_fatal)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_DRV_ERROR, FSM_DRV_ERROR, None)
self.fsm.add_transition(FSM_EVNT_D2DMV, FSM_DRV_EMPTY, FSM_DRV_EMPTY, None)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_DRV_ERROR, FSM_DRV_LOADED, self._cb_errortoloaded)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_DRV_ERROR, FSM_DRV_EMPTY, self._cb_errortoempty)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_DRV_LOADED, FSM_DRV_ERROR, self._cb_loadedtoerror)
# fatal error, admin action going on
for state in [FSM_DRV_LOADED,FSM_DRV_EMPTY, FSM_DRV_ERROR, FSM_DRV_ERROR_FATAL, FSM_DRV_MAYBERECOVERED]:
self.fsm.add_transition(FSM_EVNT_FATALERROR_1, state, FSM_DRV_ERROR_FATAL, self._cb_error_fatal)
self.fsm.add_transition(FSM_EVNT_DELDRIVE, state, FSM_DRV_ERROR_FATAL,self._cb_error_fatal)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_DRV_ERROR_FATAL, FSM_DRV_ERROR_FATAL, self._cb_errortoempty)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_DRV_ERROR_FATAL, FSM_DRV_MAYBERECOVERED, self._cb_errortoloaded)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_DRV_MAYBERECOVERED, FSM_DRV_EMPTY, self._cb_dmntcmplt)
self.fsm.add_transition(FSM_EVNT_MNTCMPLT, FSM_DRV_MAYBERECOVERED, FSM_DRV_ERROR_FATAL, self._cb_error_fatal)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_DRV_EMPTY,FSM_DRV_EMPTY, None)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_DRV_LOADED,FSM_DRV_ERROR, self._cb_error_fatal)
self.fsm.add_transition(FSM_EVNT_ROB1, FSM_DRV_ERROR_FATAL,FSM_DRV_ERROR_FATAL, None)
self.fsm.add_transition(FSM_EVNT_DISMNTCRTCMPLT, FSM_DRV_EMPTY, FSM_DRV_EMPTY, None)
# probably initial drive start
self.fsm.add_transition(FSM_EVNT_RECOVER_FAT1, FSM_DRV_ERROR, FSM_DRV_EMPTY, self._cb_recover_fatal1)
self.fsm.add_transition(FSM_EVNT_RECOVER_FAT1, FSM_DRV_LOADED, FSM_DRV_EMPTY, self._cb_recover_fatal1)
self.fsm.add_transition(FSM_EVNT_RECOVER_FAT1, FSM_DRV_EMPTY,FSM_DRV_EMPTY, self._cb_double_admin_enable)
self.fsm.add_transition(FSM_EVNT_RECOVER_FAT1, FSM_DRV_ERROR_FATAL,FSM_DRV_EMPTY, self._cb_recover_fatal1)
self.fsm.add_transition(FSM_EVNT_RECOVER_FAT1,FSM_DRV_MAYBERECOVERED,FSM_DRV_EMPTY, self._cb_recover_fatal1)
def _transform_dictentry(self, entry):
return [entry['reqm'], entry['m'], 0, entry['reqd'], entry['d'], entry['mh'],entry['dh'], entry['drv']]
def handle_event(self, event, args):
return self.fsm.handle_event(event, args)
def _cb_error_fatal(self, event, args):
self.flusherror()
return (True,None)
def _cb_double_admin_enable(self, event, args):
self._cb_error_fatal(event,args)
return (True, None)
def _cb_mountcmplt(self,event,args):
self.active['m'] = args['epoch']
self.active['drv'] = args['cid']
return (True, None)
def _cb_dmntcmplt(self,event, args):
if args['cid'] != self.active['drv']:
#print "invalid dismount received"
return (False, {'cid':args['cid']})
else:
# if not 'reqd' in args.keys():
self.active['reqd'] = args['reqd']
self.active['reqm'] = args['reqm']
self.active['d'] = args['epoch']
self.dataappend()
return (True, None)
def _cb_loadedtoerror(self,event, args):
self.flusherror()
self.fsm.subsequent_events.append([FSM_EVNT_MNTCMPLT, args])
return (True, None)
def _cb_errortoloaded(self, event,args):
self.flusherror()
return self._cb_mountcmplt(event,args)
def _cb_errortoempty(self,event, args):
self.flusherror()
return (True, None)
def _cb_recover_fatal1(self,event, args):
self.flusherror()
return (True, None)
def get_latencies(self):
res = []
def _add(start, end, home, cid, op):
diff = end-start
if 0<=diff and diff <= 3600:
res.append((end, diff, home, cid, op))
for (reqm, m, dmvol, reqd, d, mh, dh, cid) in self.data['data']:
_add(reqm, m, mh, cid, HOME_OPERATION_EJECT) # home to drive
_add(reqd, d, dh, cid, HOME_OPERATION_INJECT)# drive to home
return res
def data_import(self, reqm, m, reqd, d, mh, dh, cid):
ent = (reqm,m,None,reqd,d,mh,dh,cid)
for tmp in self.data['data']:
if m > 0:
if tmp[1] > m:
index = self.data['data'].index(tmp)
self.data['data'].insert(index, ent)
return
if reqm > 0:
if tmp[0] > reqm:
index = self.data['data'].index(tmp)
self.data['data'].insert(index, ent)
return
if m == 0:
self.data['errors'].append({'reqm':reqm,'m':m,'reqd':reqd,'d':d,'mh':mh,'dh':dh,'drv':cid, 'vol':0 })
else:
self.data['data'].append(ent)
def handle_disable(self, epoch):
self.data['disabled'].append([epoch,0])
self.flusherror()
#self.state = DRIVE_STATE_DISABLED
def handle_enable(self, epoch):
x = self.data['disabled'].pop()
print x
if x[1] != 0:
print "wtf drive enable"
else:
x[1]=epoch
self.data['disabled'].append(x)
self.state = DRIVE_STATE_IDLE
def handle_cleaning_crt_dismnt(self, epoch):
self.data['cleaning'].append(epoch)
def estimate_cleaning_time(self, start=None, end=None):
t = []
for cln in self.data['cleaning']:
if start < cln and cln <= end:
entry = self.get_predecessor_of_entry(cln, DATA_INDEX_D)
if entry:
t.append(cln-entry[DATA_INDEX_D])
if cln > end:
break
return t
class Home:
def __init__(self, hid, basedir):
self.basedir = os.path.join(basedir, "homes")
if not os.path.exists(self.basedir):
os.makedirs(self.basedir)
self.stats_calculated=False
self.data = {
'id' : str(hid),
'data' : [],
'perdrive' : {} # entries: driveid= (numops, avglatency)
}
def __repr__(self):
return self.data['id']
def handle_event(self, epoch, latency, drive, cid, optype):
# if optype == HOME_OPERATION_EJECT:
# self.eject_event(epoch, latency, drive, cid)
# elif optype == HOME_OPERATION_INJECT:
# self.inject_event(epoch, latency, drive, cid)
self.data['data'].append((epoch, latency, drive, cid, optype))
def get_stats(self, atts):
if not self.stats_calculated:
self.stats()
res = {}
for i in atts:
res[i] = self.data.get(i, 0)
return res
def stats(self):
_drivestats = {}
for (epoch, latency, drive, cid, optype) in self.data['data']:
obj = _drivestats.setdefault(drive, [])
obj.append(latency)
tmp = []
l = 0
for drive, val in _drivestats.items():
t = numpy.mean(val)
tmp.append(t)
l += len(val)
self.data['perdrive'][drive] = (len(val),t)
self.data[LATENCY_HOME_AVG] = numpy.mean(tmp)
self.data[HOME_TOTAL_OPS] = l
with open(os.path.join(self.basedir, str(self)), 'w') as csv_file:
lineBuf = StringIO.StringIO()
lineBuf.write("drive;%s;%s"%(HOME_TOTAL_OPS, LATENCY_HOME_AVG))
for d,(num,lat) in self.data['perdrive'].items():
linebuf_write(lineBuf, "\n%s;%s;%s"%(d,num,lat))
csv_file.write(lineBuf.getvalue())
csv_file.close()
self.stats_calculated=True
def cost_drive(self, drv=None):
if not drv:
return self.data['perdrive']
res = self.data['perdrive'].get(drv, None)
if not res:
return self.data[LATENCY_HOME_AVG]
else:
return res[1]
class God:
def __init__(self, basedir):
self.basedir = basedir
self.outputdir = os.path.join(basedir, "stats")
self.whpssdir = os.path.join(basedir, "../whpss")
self.robotdir = os.path.join(self.basedir, '../robot/robot_mounts*.gz')
#self._perdrive_outputdir = os.path.join(self.outputdir, 'drives')
#self._percartridge_outputdir = os.path.join(self.outputdir, 'cartridges')
self._global_crt_csv = os.path.join(self.outputdir ,'global_crt.csv')
self._global_drv_csv = os.path.join(self.outputdir ,'global_drv.csv')
self._global_hm_csv = os.path.join(self.outputdir ,'global_hm.csv')
self._json_dump_robot = os.path.join(basedir, "robot_classes")
self.crt = {}
self.drv = {}
self.hm = {}
self.robot_totalcnt = 0
self.robot_errorcnt_crt = 0
self.robot_errorcnt_drv = 0
for i in [self.outputdir]:
if not os.path.exists(i):
os.makedirs(i)
def _get_hm(self, hid, create=True):
if hid not in self.hm and create:
self.hm[hid] = Home(hid, self.outputdir)
return self.hm.get(hid,None)
def _get_crt(self, cid, create=True):
if cid not in self.crt and create:
self.crt[cid] = Cartridge(cid, self.outputdir)
return self.crt.get(cid,None)
def _get_drv(self, drv, create=True):
if drv not in self.drv and create:
self.drv[drv] = Drive(drv, self.outputdir)
return self.drv.get(drv,None)
def handle_event(self, evnt, arguments):
if 'cid' in arguments.keys():
cartridge_id = arguments['cid']
if cartridge_id[:2] in crtfilter_b or cartridge_id[:1] in crtfilter_a:
# filter cartridges
return
crt = self._get_crt(cartridge_id )
if evnt == None:
pass #dummy
else:
a = crt.handle_event(evnt,arguments)
if a:
for k,v in a.items():
arguments[k]=v
#else:
# print "no cid... what the fuck"
if 'drive' in arguments.keys():
if len(arguments['drive'])>0:
#if arguments['drive'][0] not in ['4', '3']:
if len(arguments['drive']) <= 6:
drv = self._get_drv(arguments['drive'])
if evnt == None:
pass #dummy
else:
ret = drv.handle_event(evnt, arguments)
if ret:
if 'cid' in ret.keys():
# error in cids last event
self.force_last_event_flush(cid=ret['cid'], epoch=arguments['epoch'])
else:
"no drive provided, or needed"
def force_last_event_flush(self, cid, epoch):
crt = self._get_crt(cid, False)
if crt:
crt.force_last_event_flush(epoch)
def robot_dismount(self, cid, epoch, drive, library):
self.robot_totalcnt+=1
crt = self._get_crt(cid, False)
if crt:
if not crt.robot_dismount(epoch, drive, library):
#print "error robot dismount"
self.robot_errorcnt_crt+=1
else:
pass
#print "Unknown crt",cid, epoch
drv = self._get_drv(drive , False)
if drv:
if cid[:3] != 'CLN':
if not drv.robot_dismount(epoch, cid, library):
self.robot_errorcnt_drv+=1
else:
drv.handle_cleaning_crt_dismnt(epoch)
def robot_mount(self,cid, epoch, drive, library):
self.robot_totalcnt+=1
crt = self._get_crt(cid, False)
if crt:
if not crt.robot_mount(epoch, drive, library):
#print "error robot mount"
self.robot_errorcnt_crt+=1
else:
pass
#print "Unknown crt",cid, epoch
drv = self._get_drv(drive , False)
if drv:
if not drv.robot_mount(epoch, cid, library):
self.robot_errorcnt_drv+=1
def collect_recovered_errors(self):
recovered = 0
remaining = 0
for cid,crt in self.crt.items():
a,b = crt.collect_recovered_errors()
recovered += a
remaining += b
for did,drv in self.drv.items():
a,b = drv.collect_recovered_errors()
recovered += a
remaining += b
print "Recovered:%i, Remaining:%i"%(recovered, remaining)
def handle_warning(self, epoch, drive, cid):
crt = self._get_crt(cid, False)
if crt:
crt.flushactive()
def jsonload(self, skipdrives=False):
jsondir = os.path.join(self.basedir, 'json')
if not skipdrives:
drvf = "%s_drv.json"%self._json_dump_robot
if os.path.isfile(drvf):
with open(drvf, 'r') as f:
#print "Reading %s"%drvf
for e in json.load(f):
obj = Drive(e['id'], self.outputdir)
obj.data = e
self.drv[e['id']]=obj
all_crt_files = sorted(glob.glob(os.path.join(jsondir, 'crt_*.json')))
for crtf in all_crt_files:
with open(crtf, 'r') as f:
#print "Reading %s"%crtf
e = json.load(f)
obj = Cartridge(e['id'], self.outputdir)
obj.data = e
self.crt[e['id']]=obj
hmf = "%s_hm.json"%self._json_dump_robot
if os.path.isfile(hmf):
with open(hmf, 'r') as f:
#print "Reading %s"%hmf
for e in json.load(f):
obj = Home(e['id'], self.outputdir)
obj.data = e
self.hm[e['id']]=obj
def jsondump(self):
def _dmp(file, data):
with open(file, 'w') as f:
json.dump(data, f, indent=1)
jsondir = os.path.join(self.basedir, "json")
if not os.path.isdir(jsondir):
os.makedirs(jsondir)
drv = []
hm = []
for obj in self.crt.values():
fn = os.path.join(jsondir,"crt_%s.json"%obj.data['id'])
_dmp(fn, obj.data)
for obj in self.drv.values():
drv.append(obj.data)
for obj in self.hm.values():
hm.append(obj.data)
_dmp(os.path.join(jsondir,"%s_drv.json"%self._json_dump_robot), drv)
_dmp(os.path.join(jsondir,"%s_hm.json"%self._json_dump_robot), hm)
def derive_homes(self):
#for cid, crt in self.crt.items():
# for op in crt.data['data']:
# if len(op[DATA_INDEX_DRV]) > 0:
# drv = self._get_drv(op[DATA_INDEX_DRV], True)
# reqm = op[DATA_INDEX_REQM]
# m = op[DATA_INDEX_M]
# reqd = op[DATA_INDEX_REQD]
# d = op[DATA_INDEX_D]
# dh = op[DATA_INDEX_DH]
# mh = op[DATA_INDEX_MH]
# drv.data_import(reqm, m, reqd, d, mh, dh, cid)
for id,drv in self.drv.items():
for (epoch, latency, home, cid, optype) in drv.get_latencies():
if home and len(home) >3:
hobj = self._get_hm(home, True)
hobj.handle_event(epoch, latency, id, cid, optype)
def stats(self):
if FULLRUN or 1:
### generate global cartridge statistics
with open(self._global_crt_csv, 'w') as csv_file:
atts = ['id']
atts.extend(GLOBAL_CRT)
special = []
special.extend(SPECIAL_CRT)
sortres = []
lineBuf = StringIO.StringIO()
cnt=1
for id in sorted(self.crt.keys()):
obj = self.crt.get(id)
res = obj.stats(atts,special)
if lineBuf.len==0:
lineBuf.write("index")
sortres = sorted(res.keys())
for k in sortres:
lineBuf.write(";%s"%k)
lineBuf.write("\n%i"%cnt)
for k in sortres:
linebuf_write(lineBuf, res.get(k,0))
#lineBuf.write("\n")
cnt+=1
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
if FULLRUN or 1:
### generate global cartridge statistics
with open(self._global_hm_csv, 'w') as csv_file:
sortres = []
lineBuf = StringIO.StringIO()
cnt=1
for id in sorted(self.hm.keys()):
obj = self.hm.get(id)
res = obj.get_stats(GLOBAL_HM)
if lineBuf.len==0:
lineBuf.write("id;index")
sortres = sorted(res.keys())
for k in sortres:
lineBuf.write(";%s"%k)
lineBuf.write("\n")
lineBuf.write("%s;%i"%(id,cnt))
for k in sortres:
linebuf_write(lineBuf, res.get(k,0))
lineBuf.write("\n")
cnt+=1
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
if FULLRUN or 1:
### generate lobal drive statistics
with open(self._global_drv_csv, 'w') as csv_file:
atts = ['id']
atts.extend(GLOBAL_DRV)
special = []
special.extend(SPECIAL_DRV)
lineBuf = StringIO.StringIO()
sortres = []
cnt=1
for id in sorted(self.drv.keys()):
obj = self.drv.get(id)
res = obj.stats(atts,special)
if lineBuf.len==0:
lineBuf.write("id;index")
sortres = sorted(res.keys())
for k in sortres:
lineBuf.write(";%s"%k)
lineBuf.write("\n")
lineBuf.write("%s;%i"%(id,cnt))
for k in sortres:
linebuf_write(lineBuf,res.get(k,0))
lineBuf.write("\n")
cnt+=1
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
### generate per timeslot statistics
if FULLRUN or 0: # stopit
tup = []
tup.append((self.crt,'cartridges'))
#tup.append((self.drv,'drives'))
for (dataref,stringref) in tup:
for key in ['per_hour','per_day','per_week','per_month', 'per_year']:
for opt in [TOTAL_MNT_TIME, TOTAL_MNTS, HOTNESS]:
f = os.path.join(self.outputdir, '%s_%s_%s.csv'%(stringref, opt, key))
ids = []
with open(f, 'w') as csv_file:
d = {}
for id, obj in sorted(dataref.items()):
if id not in ids:
ids.append(id)
res = obj.pertime(obj.data['data'])
for ts,data in res[key].items():
if ts not in d.keys():
d[ts] = {}
d[ts][id] = data.get(opt,0)
lineBuf = StringIO.StringIO()
lineBuf.write("timestamp")
for id in ids:
lineBuf.write(";%s"%id)
for ts in sorted(d.keys()):
lineBuf.write('\n%s'%ts)
for id in ids:
linebuf_write(lineBuf,d[ts].get(id,0))
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
if FULLRUN or 0:
for id,hobj in self.hm.items():
hobj.stats()
def robot_read(self):
def _handle_file(filelist, obj):
for filename in filelist:
with gzip.open(filename, 'r') as source_file:
for line in source_file:
match = re_line.search(line)
if match:
g = match.groups()
epoch = get_epoch(g[0])
action = g[1]
cartridge_id = g[2]
library_pos = g[3] # not used right now.
rawdrive = string.split(g[5], ',')
if len(rawdrive)>3:
drive = "%i%02i%i%02i"%(int(rawdrive[0]),int(rawdrive[1]),int(rawdrive[2]),int(rawdrive[3]))
else:
drive = rawdrive
if action == "MOUNT":
obj.robot_mount(cartridge_id, epoch, drive, library_pos)
continue
if action == "DISMOUNT": # be aware of cleaning cartridges
obj.robot_dismount(cartridge_id, epoch, drive, library_pos)
continue
# elif action == "ENTER":
# self.handle_enter(cartridge_id, epoch)
# elif action == "EJECT":
# self.handle_eject(cartridge_id, epoch)
# elif action == 'ACSMV':
# self.handle_move(epoch, library_pos, drive)
if action == 'ACSCR':
if line.__contains__('STATUS_VOLUME_NOT_FOUND'):
continue
print 'unparsed' ,line,
re_line = re.compile(".*([0-9]{8}:[0-9]{6}).* (ACSMV|ACSCR|AUDIT|EJECT|ENTER|MOUNT|DISMOUNT) ([0-9a-zA-Z]{6}) Home ([0-9,]*) ([a-zA-Z\s]+) ([0-9,]*) .*")
re_line_not_found = re.compile(".*([0-9]{8}:[0-9]{6}).* (ACSCR|AUDIT) ([0-9a-zA-Z]{6}).* (STATUS_VOLUME_NOT_FOUND) .*")
numthrads = 2
threadfiles = {}
for i in range(numthrads):
threadfiles[i]= []
all_log_files = sorted(glob.glob(self.robotdir))
#for i in all_log_files:
# index = all_log_files.index(i)%numthrads
# threadfiles[index].append(i)
#threads = []
#for i in range(numthrads):
# t1 = threading.Thread(target=_handle_file, args=(threadfiles[i],self))
# threads.append(t1)
# t1.start()
#for i in threads:
# i.run()
# alive = 0
if 1:
for filename in all_log_files:
print filename
with Timer("Finished: %s:" % (filename)):
with gzip.open(filename, 'r') as source_file:
for line in source_file:
# alive +=1
match = re_line.search(line)
# if not alive%1000:
# print line,
# alive=0
if match:
g = match.groups()
epoch = get_epoch(g[0])
action = g[1]
cartridge_id = g[2]
library_pos = g[3] # not used right now.
rawdrive = string.split(g[5], ',')
if len(rawdrive)>3:
drive = "%i%02i%i%02i"%(int(rawdrive[0]),int(rawdrive[1]),int(rawdrive[2]),int(rawdrive[3]))
else:
drive = rawdrive
if action == "MOUNT":
self.robot_mount(cartridge_id, epoch, drive, library_pos)
continue
if action == "DISMOUNT": # be aware of cleaning cartridges
self.robot_dismount(cartridge_id, epoch, drive, library_pos)
continue
# elif action == "ENTER":
# self.handle_enter(cartridge_id, epoch)
# elif action == "EJECT":
# self.handle_eject(cartridge_id, epoch)
# elif action == 'ACSMV':
# self.handle_move(epoch, library_pos, drive)
if action == 'ACSCR':
if line.__contains__('STATUS_VOLUME_NOT_FOUND'):
continue
print 'unparsed' ,line,
print 'total', self.robot_totalcnt, 'error', self.robot_errorcnt_crt, self.robot_errorcnt_drv
def whpss_read(self):
re_generic_crt = re.compile(".* cartridge\s*=\s*\"([0-9A-Z]+)\".*")
re_generic_drv = re.compile(".* drive = \"([0-9]+)\".*")
# 05/26 00:15:46 ***
re_time = re.compile("([0-9]{2})/([0-9]{2}) ([0-9]{2}):([0-9]{2}):([0-9]{2}) ([A-Z]+) .*")
re_filename_date = re.compile("([0-9]{4})([0-9]{2})([0-9]{2})_([0-9]{2})([0-9]{2})([0-9]{2}).gz")
re_crtanddrv = re.compile(".* cartridge = \"([0-9A-Z]+)\", drive = \"([0-9]+)\".*")
# 03/12 20:53:36 RQST PVRS0004 Entering pvr_Mount, cartridge = "CC0968", drive = "0"
re_pvr_Mount = re.compile(".* pvr_Mount,.*cartridge = \"([0-9A-Z]+)\".*")
re_pvl_Mount = re.compile(".* \"pvl_MountAdd\", .* arg = \"([0-9A-Z]+)\".*")
# 03/07 17:51:11 DBUG PVRS0379 STK Request: acs_mount: seq= 23557, cart= WB2125
re_acs_mount = re.compile(".* cart= ([0-9A-Z]+)")
# 05/26 00:15:46 RQST PVLS0002 Exiting, function = "pvl_MountCompleted", jobid = "11644979", drive = "101101", arg = "WB3134"
re_pvl_MountCompleted = re.compile(".* jobid = \"([0-9]+)\".* drive = \"([0-9]+)\", arg = \"([0-9A-Z]+)\".*")
re_pvl_MountCompleted2 = re.compile(".* jobid = \"([0-9]+)\".* arg = \"([0-9A-Z]+)\"")
re_warning = re.compile(".* jobid = \"([0-9]+)\".* arg = \"([0-9A-Z]+)\"")
re_warn_d1 = re.compile(".* Error reading device ([0-9]+).* vol ([A-Z0-9]+).*")
re_warn_drive = re.compile(".* Drive disabled, drive = \"(([0-9]+))\".*")
re_warn_c = re.compile(".*Could not mount volume\: ([A-Z0-9]+)\:.*")
re_evnt_drive_enabl = re.compile(".* Drv= ([0-9]+), Enabled.*")
re_alrm_a = re.compile(".*Dismount ([A-Z0-9]+) pends.*")
re_pvr_DismountCart = re.compile(".*cartridge = \"([0-9A-Z]+)\".*")
re_pvl_DismountVol = re.compile(".*arg\s*=\s*\"([0-9A-Z]+)\".*")
# 03/01 08:28:11 RQST PVRS0012 Entering pvr_Inject, cartridge = "RC2773", drive = "0"
re_pvr_Inject = re.compile(". cartridge = \"([0-9A-Z]+)\"")
# 03/01 08:23:37 EVNT PVRS0043 Ejecting cartridge="P54892", manufacturer="IBM LTO3-1", lot="Jul09", began service Tue Aug 25 12:44:27 2009, last maintained Thu Jan 1 00:00:00 1970, last mounted Tue Feb 26 22:10:33 2013, total mounts=12, mounts since last maintained=12
re_pvr_Eject = re.compile(".* cartridge = \"([0-9A-Z]+)\".*")
# 01/02 18:34:59 MINR PVRS0141 Robot unable to find cartridge, cartridge = "CB5564", drive = "1,3,1,3", drive = "0"
re_minr_lost_cartridge = re.compile(".* MINR .* Robot unable to find .* cartridge = \"([0-9A-Z]+)\", drive = \"([,0-9]+)\", .*")
re_minr_lost_cartridge2 = re.compile(".* MINR .* Robot unable to find .* cartridge = \"([0-9A-Z]+)\", drive = \"0\".*")
re_minr_drv_stuck = re.compile('.* drive in use or locked by .* drive = \"([0-9,]+)\", drive .*')
# nunn lines
re_nunn_importerr = re.compile(".*Import of cartridge '([A-Z0-9]+)' failed.*")
last_epoch=0
log_creation_date = None
files = sorted(glob.glob(os.path.join(self.whpssdir,'whpss_log_*.gz')))
for f in files:
m = re.search(re_filename_date, os.path.basename(f))
if not m:
print ("ERROR, cannot process invalid file name: %s" % (f))
return
x = m.groups()
if not log_creation_date:
log_creation_date = datetime.datetime(int(x[0]), int(x[1]),int(x[2]),int(x[3]),int(x[4]),int(x[5]))
print "--------------------------------------------------------------------------------------------------"
print "Reading file %s.\n"%f
with gzip.open(f, 'r') as source:
for line in source.readlines():
try:
time_match = re_time.search(line)
if time_match:
x = time_match.groups()
log_entry_date = datetime.datetime(log_creation_date.year, int(x[0]), int(x[1]),int(x[2]),int(x[3]),int(x[4]))
epoch = int(calendar.timegm(log_entry_date.utctimetuple()))
if epoch >= last_epoch:
# chronologic order
last_epoch = epoch
else:
print "last epoch", last_epoch
print 'current epoch', epoch, x
if int(x[0])==1 and int(x[1])==1:
log_creation_date = datetime.datetime(log_creation_date.year+1, int(x[0]), int(x[1]),int(x[2]),int(x[3]),int(x[4]))
log_entry_date = datetime.datetime(log_creation_date.year, int(x[0]), int(x[1]),int(x[2]),int(x[3]),int(x[4]))
epoch = int(calendar.timegm(log_entry_date.utctimetuple()))
print 'fixed epoch', epoch
#print "Hard abort due to chronologic error in line: \n\t%s" % (line)
#sys.exit(1)
#raw_input("Is this ok?")
if line.__contains__(" DBUG "):
continue
if line.__contains__(' RQST '):
if line.__contains__('Entering'):
if line.__contains__("pvl_MountAdd"): # ok #
match = re_pvl_Mount.search(line)
if match:
cartridge_id = match.groups()[0][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_VOLMNT, {'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__('pvr_Mount'):
match2 = re_pvr_Mount.search(line)
if match2:
cartridge_id = match2.groups()[0][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_MNTREQ, {'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("pvr_DismountCart"):
match = re_pvr_DismountCart.search(line)
if match:
cartridge_id = match.groups()[0][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_DISMNTCRTREQ, {'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("pvl_DismountVolume"):
match = re_pvl_DismountVol.search(line)
if match:
cartridge_id = match.groups()[0][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_VOLDMNT, {'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("pvr_Inject"):
# a new cartridge is added, add it to list of injected_cartridges
match = re_pvr_Inject.search(line)
if match:
self.handle_event(FSM_EVNT_INJECT, {'epoch': epoch, 'cid':match.groups()[0][:6]})
continue
if line.__contains__("pvr_Eject"):
match = re_pvr_Eject.search(line)
if match:
self.handle_event(FSM_EVNT_EJECT, {'cid':match.groups()[0][:6], 'epoch':epoch})
continue
if line.__contains__("Exiting"):
# can happen multiple times within 'cartridge_mount_process' last seen pvl_MountCompleted is assumed to be successfull.
if line.__contains__("pvl_MountCompleted"):
match = re_pvl_MountCompleted.search(line)
if match:
job_id = match.groups()[0]
drive = match.groups()[1]
cartridge_id = match.groups()[2][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_MNTCMPLT, {'cid':cartridge_id, 'epoch':epoch, 'drive':drive})
continue
match2 = re_pvl_MountCompleted2.search(line)
if match2:
job_id = match2.groups()[0]
cartridge_id = match2.groups()[1][:6]
#print "bad mount: maybe ejected from library. check CB5564 2.jan 19.06.17 entry, jobid:", job_id, cartridge_id
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':cartridge_id})
continue
if line.__contains__("pvr_DismountCart"):
match = re_pvr_DismountCart.search(line)
if match:
cartridge_id = match.groups()[0][:6]
if cartridge_id[:2] not in crtfilter_b and cartridge_id[:1] not in crtfilter_a:
self.handle_event(FSM_EVNT_DISMNTCRTCMPLT, {'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("pvl_DeleteDrive"):
m = re_generic_drv.match(line)
if m:
self.handle_event(FSM_EVNT_DELDRIVE, {'drive':m.groups()[0], 'epoch':epoch})
continue
if line.__contains__("Entering"):
if line.__contains__("pvl_MountCompleted") or \
line.__contains__("pvl_DeleteDrive"):
continue
if line.__contains__("Exiting"):
if line.__contains__('pvl_MountAdd') or \
line.__contains__('pvr_Mount') or \
line.__contains__('pvl_DismountVolume') or \
line.__contains__('pvr_Eject') or \
line.__contains__('pvr_Inject') :
continue
docont = False
for x in ['pvl_RequestSetAttrs', 'pvl_RequestGetAttrs','pvl_MountNew',
'pvr_CartridgeGetAttrs', 'pvr_CartridgeSetAttrs', 'pvr_MountComplete',
'pvl_MountCommit',"pvl_QueueGetAttrs","pvl_QueueSetAttrs",'pvl_DriveSetAttrs',
'pvl_VolumeGetAttrs', 'pvl_VolumeSetAttrs', 'pvl_PVLSetAttrs',
"pvr_ServerSetAttrs",'pvl_ServerSetAttrs', 'pvl_DismountJobId',
'pvl_AllocateVol', 'pvl_DeallocateVol', 'pvl_Import', 'pvl_Export',
'pvl_CheckInCompleted', 'pvl_DriveGetAttrs',
'pvl_CreateDrive','pvl_Terminate', 'pvl_Move',
'pvl_CancelAllJobs']:
if line.__contains__(x):
docont = True
break
if docont:
continue
if line.__contains__(' MINR '):
m = re_minr_lost_cartridge.search(line)
if m:
cartridge_id = m.groups()[0][:6]
drive = m.groups()[1]
rawdrive = string.split(drive, ',')
if len(rawdrive)>3:
drive = "%i%02i%i%02i"%(int(rawdrive[0]),int(rawdrive[1]),int(rawdrive[2]),int(rawdrive[3]))
self.handle_event(FSM_EVNT_ROB1, {'epoch':epoch, 'cid':cartridge_id, 'drive':drive})
continue
m = re_minr_lost_cartridge2.match(line)
if m:
cartridge_id = m.groups()[0][:6]
self.handle_event(FSM_EVNT_ROB1, {'epoch':epoch, 'cid':cartridge_id})
continue
m = re_minr_drv_stuck.match(line)
if m:
drive = m.groups()[0]
rawdrive = string.split(drive, ',')
if len(rawdrive)>3:
drive = "%i%02i%i%02i"%(int(rawdrive[0]),int(rawdrive[1]),int(rawdrive[2]),int(rawdrive[3]))
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'drive':drive})
continue
if line.__contains__("Intervention necessary"):
m = re_generic_crt.match(line)
if m:
cartridge_id = m.groups()[0]
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':cartridge_id})
continue
if line.__contains__('Dismount failed due to STK IN_TRANSIT status'):
m = re_generic_crt.match(line)
if m:
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':m.groups()[0]})
continue
if line.__contains__("Could not mount volume"):
m =re_warn_c.match(line)
if m:
cr = m.groups()[0][:6]
self.handle_event(FSM_EVNT_FATALERROR_1,{'epoch':epoch, 'cid':cr})
continue
if line.__contains__("Drive Disabled"):
m = re_pvl_MountCompleted.match(line) # to extract vars
if m:
j, drv, crt = m.groups()
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':crt[:6], 'drive':drv})
continue
if line.__contains__("Not owner") or \
line.__contains__(" Rewind of device 40") or \
line.__contains__("Stage failed, all retries exhausted") or \
line.__contains__("Request for locked or disabled device") or \
line.__contains__('Unexpected error in LTO Library') or \
line.__contains__('LTO I/O failure') or \
line.__contains__('"sendIOD"') or \
line.__contains__('hpss_RPCGetReply') or \
line.__contains__("gk_Cleanup") or \
line.__contains__("gk_Close failed") or \
line.__contains__('Invalid parameters passed to LTO Library') or \
line.__contains__('Open of device 4') or \
line.__contains__('pos failed on dev 4') or \
line.__contains__('Retrying stage from level') or \
line.__contains__('Cartridge not found in LTO library') or \
line.__contains__("Read of label on") or \
line.__contains__('Can not find the PVL') or \
line.__contains__('all retries exhausted') or \
line.__contains__('locked by non-HPSS') or \
line.__contains__('SCSI ') or \
line.__contains__('hpss_RPCSendRequest') or \
line.__contains__('Forward space file failed') or \
line.__contains__('Verification of label on dev') or \
line.__contains__('Open of device') or \
line.__contains__('No space left on device') or \
line.__contains__('Metadata manager error') or \
line.__contains__('Connection refused') or \
line.__contains__('Connection timed out') or \
line.__contains__('ACSLM spawned process') or \
line.__contains__('Cannot Establish Connection') or \
line.__contains__('VV metadata') or \
line.__contains__('Open of delog') or \
line.__contains__('database deadlock condition') or \
line.__contains__('to execute stateme') or \
line.__contains__('LOCKING the DRIVE') or \
line.__contains__('Returned, function') or\
line.__contains__('storage service start') or \
line.__contains__('Invalid session') or \
line.__contains__('repair to server') or \
line.__contains__('MM error'):
continue # minor
if line.__contains__(' WARN '):
# drive = "102100", arg = "XB063500"
#m = re_warning.match(line)
#if m:
#self.handle_warning(epoch, m.groups()[0], m.groups()[1][:6])
m2 = re_warn_drive.match(line)
if m2:
drive = m2.groups()[0]
self.handle_event(FSM_EVNT_FATALERROR_1, {'drive':drive,'epoch':epoch})
continue
m3 = re_warn_d1.match(line)
if m3:
cartridge_id = m3.groups()[1][:6]
drive = m3.groups()[0]
#self.handle_event(FSM_EVNT_FATALERROR_1, {'drive':drive, 'cid':cartridge_id, 'epoch':epoch})
continue
if line.__contains__("will retry in another drive,"):
m = re_crtanddrv.match(line)
if m:
crt = m.groups()[0]
drv = m.groups()[1]
self.handle_event(FSM_EVNT_D2DMV, {'drive':drv, 'epoch':epoch, 'cid':crt})
continue
if line.__contains__('"read_label"') or \
line.__contains__('are disabled,') or \
line.__contains__("LOCKING the DRIVE will exit the dismount loop") or \
line.__contains__(' no response from robot') or \
line.__contains__('rtm_GetRequestEntries') or \
line.__contains__('NOT exist in DriveTable') or \
line.__contains__('cartridge = "MP') or \
line.__contains__('label written') or \
line.__contains__('Client Cancels All Jobs') or \
line.__contains__('Job recovered, di') or \
line.__contains__('hardware defined in HPSS does not exist') or \
line.__contains__('Dismount reason') or \
line.__contains__('Job not found in queue') or \
line.__contains__(' PVR) are disabled, arg = "MA') or \
line.__contains__('Cache Overflow') or \
line.__contains__('Cartridge has not been checked in') or \
line.__contains__('No drives of this type in') or \
line.__contains__('not found in LTO') or \
line.__contains__('Not enough drives of this type') or \
line.__contains__('Drive Notify failed') or \
line.__contains__('= "eject_cart"') or \
line.__contains__('information request failed') or \
line.__contains__(' STATUS_') or \
line.__contains__('Address types'):
continue #warn
if line.__contains__(' Dismount reason') and line.__contains__('drive = "4'):
continue
if line.__contains__(' EVNT '):
m1 = re_evnt_drive_enabl .match(line)
if m1:
drive = m1.groups()[0]
self.handle_event(FSM_EVNT_RECOVER_FAT1, {'drive':drive, 'epoch':epoch})
continue
if line.__contains__("Client logged ") or \
line.__contains__('Total Drive Count') or \
line.__contains__(' logfiles ') or \
line.__contains__('Storage map state') or \
line.__contains__("CONAN") or \
line.__contains__("erver 'Mover") or \
line.__contains__("'STK PVR'") or \
line.__contains__("Connection table full") or \
line.__contains__("Open files on connection shutdown") or \
line.__contains__("Repack Completed SClassId") or \
line.__contains__('robot is offline, drive = "0') or \
line.__contains__("Deferred state change") or \
line.__contains__("End of media on ") or \
line.__contains__('Reclaim completed for storage') or \
line.__contains__("Request w/o client") or \
line.__contains__("Exporting cartridge") or \
line.__contains__("Export of ") or \
(line.__contains__(", Disabled") and line.__contains__("dmin drive change") )or\
line.__contains__("av_Initialize") or \
line.__contains__("Mount failed, no drives") or \
line.__contains__("Import of cartridge ") or \
line.__contains__("STK volume ejects are done asynchronously") or \
line.__contains__("could not be mounted, Condition") or \
line.__contains__('Job not found in queue') or \
line.__contains__("Core Server shutting") or \
line.__contains__('All disk storage maps') or \
line.__contains__('SSMS0115') or \
line.__contains__('Core Server Shutdown Complete') or \
line.__contains__('Running with restricted') or \
line.__contains__('No initialization is necessary') or \
line.__contains__('Reissuing ') or \
line.__contains__(' in PVR') or \
line.__contains__('mm_ReadPVR') or \
line.__contains__('Ejecting cartridge=') or \
line.__contains__('Core Server startup') or \
line.__contains__('Starting server') or \
line.__contains__('been shutdown') or \
line.__contains__('Delog complete') or \
line.__contains__('Startup of server') or \
line.__contains__('core_SignalThread') or \
line.__contains__('has been renamed') or \
line.__contains__('abel written') or \
line.__contains__('CHECK_DISK_') or \
line.__contains__('Core Server Admin'):
continue #evnt
if line.__contains__(" ALRM "):
if line.__contains__(" Write request failed") or \
line.__contains__(" Read request failed") or \
line.__contains__('Data copy operation failed') or \
line.__contains__("Cannot lock VV cache record") or \
line.__contains__("Connection timed out") or\
line.__contains__("Not owner") or \
line.__contains__('No such file or ') or \
line.__contains__("HPSS system failure") or \
line.__contains__(" request descriptor table") or \
line.__contains__('Error creating credentials') or \
line.__contains__('File too large') or \
line.__contains__('hpss_FilesetGetAttributes') or \
line.__contains__('request threads busy') or \
line.__contains__('DB connection has been busy') or \
line.__contains__('Failed to get RTM records') or \
line.__contains__("Retrying read from level") or \
line.__contains__(" Rewind of device") or \
line.__contains__('PVR reports mounting a cartridge in a drive which') or \
line.__contains__('Request queue full') or \
line.__contains__('No space left on device') or \
line.__contains__('Internal software error') or \
line.__contains__('Unable to obtain the Fileset') or \
line.__contains__(' CAP priorit') or \
line.__contains__('Restricted User list') or \
line.__contains__('sending RPC reply') or \
line.__contains__('Error sending data') or \
line.__contains__('Deferred state change') or \
line.__contains__('rtm_Reconnect') or \
line.__contains__(' SAN3P ') or \
line.__contains__('Resource locked') or \
line.__contains__('missing mover error ') :
continue #alrm
if line.__contains__('Cartridge reported IN_TRANSIT'):
m = re_alrm_a.match(line)
if m:
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':m.groups()[0]})
continue
if line.__contains__(" NUNN "):
m = re_nunn_importerr.match(line)
if m:
self.handle_event(FSM_EVNT_FATALERROR_1, {'epoch':epoch, 'cid':m.groups()[0]})
continue
if line.__contains__('Error no owner found') or \
line.__contains__('on write()') or \
line.__contains__('SS and BFS '):
continue
if line.__contains__(" MAJR "):
if line.__contains__("Gatekeeper Server") or \
line.__contains__("RPC reply") or \
line.__contains__('hpss_ConnMgrGrabConn') or \
line.__contains__('died on host') or \
line.__contains__('not initialize socket') or \
line.__contains__('Error receiving data') or \
line.__contains__('rror obtaining transmit'):
continue
if line.__contains__("ECFS "):
if line.__contains__("CORE") or \
line.__contains__('MPS'):
continue
if line.__contains__('MARS '):
if line.__contains__('CORE') or \
line.__contains__('MPS') :
continue
if line.__contains__('ROOT' ):
if line.__contains__(' MPS') or \
line.__contains__(' CORE'):
continue
if line.__contains__(' HPSS '):
continue
if line.__contains__('Checking out cartridge') or \
line.__contains__('Shutdown of server') or \
line.__contains__('Tape aggregation') or \
line.__contains__('itfile') or \
line.__contains__('PVR reestablished') or \
line.__contains__('eer uuid') or \
line.__contains__('hpss_RPC') or \
line.__contains__('RPC runtime error') or \
line.__contains__('pvr_PVRSetAttrs') or \
line.__contains__("Gatekeeper") or \
line.__contains__("GateKeeper") or \
line.__contains__('Authentication') or \
line.__contains__('Bad connection handle') or \
line.__contains__("PVR 'STK PVR") or \
line.__contains__(' log files ') or \
line.__contains__(' Mover ') or \
line.__contains__('passive side of') or \
line.__contains__('einitialization ') or \
line.__contains__('hpss_prod') or \
line.__contains__('136.156.') or \
line.__contains__(' TRAC ') or \
line.__contains__('-mvr1') or \
line.__contains__('USERSPACE1') or \
line.__contains__('pvr_Check') or \
line.__contains__('hdrv01'):
continue
if line.__contains__('Failure'):
if line.__contains__('querying') or \
line.__contains__:
continue
print "unparsed line", line,
else:
pass
#if len(line)> 16:
# print "time did not match", line
except:
print "unknown", line
raise
sys.exit()
#print " file done"
#sys.exit()
def correlation(self):
#with Timer("Correlation-Finished:"):
def _exec_(csvfile, p):
c = corrPearson()
print datetime.datetime.now()
print csvfile
c.read(csvfile)
#while True:
#p = q.get()
if p in string.ascii_uppercase:
# p = 'Y'
# for p in reversed(string.ascii_uppercase):
# print p
fields = c.filter_fields(['%s.+'%p], [])
if len(fields) > 1:
print "Run for argument ", p
res = c.full_correlation_matrix(fields, "correlation_%s"%p)
c.jsondump("%s_proj_%s.json"%(csvfile, p),res)
#sorted_res = {}
#for x in res.keys():
# for y,v in res[x].items():
# if not v in sorted_res.keys():
# sorted_res[v[0]]=[]
# sorted_res[v[0]].append((x,y))
#for i in sorted(sorted_res.keys()):
# print i, sorted_res[i]
# else:
# break
a = 'cartridges_tmt_per_hour.csv'
csvfile = os.path.join(self.outputdir,a)
#for p in ['Z']:
for p in reversed(string.ascii_uppercase):
_exec_(csvfile,p)
gc.collect()
print "done"
def highestlevelstats(self):
res = {}
res3d = []
for drv in sorted(self.drv.keys()):
curmax = max(DRV_INT_MAP.values())
DRV_INT_MAP[drv] = curmax+1
for hid in sorted(self.hm.keys()):
curmax = max(HID_INT_MAP.values())
HID_INT_MAP[hid] = curmax+1
for hid, obj in self.hm.items():
c = obj.cost_drive()
for k,v in c.items():
if v[1]< 600:
res3d.append([HID_INT_MAP[hid],DRV_INT_MAP[k],v[1], v[0]])
latency = round(v[1] ,0)
ent = res.setdefault(latency, 0)
res[latency] = ent+1
filtered = {}
for i in range(0,300):
v = res.setdefault(i, 0)
filtered[i]=v
figp = os.path.join(self.outputdir, "drive_home_latency.png")
plot_dict(filtered, figp)
costhd = os.path.join(self.outputdir, "home_drive_costs.csv")
with open(costhd , 'w') as mesh_file:
lineBuf = StringIO.StringIO()
lineBuf.write("home;drive;latency;observations")
for [x,y,z,n] in res3d:
lineBuf.write("\n%i;%i;%.1f;%.1f"%(x,y,z,n))
mesh_file.write(lineBuf.getvalue())
mesh_file.close()
#vis = fast_plot.Visualizer("Home", "Drive", "AvgLatency")
#vis.new_group_data(res3d)
#vis.show()
def checkerrors(self):
operations = 0
successful_crt_cycles = 0.0
failed_crt_cycles = 0.0
for crt, obj in self.crt.items():
successful_crt_cycles += obj.get_successful_cycles()
failed_crt_cycles += obj.get_failed_cycles()
#err = obj.checkerrors()
#if err >0:
# print crt, err#obj, err
#sys.exit(1)
print 'Failed ',failed_crt_cycles
print 'Successful:' , successful_crt_cycles
print 'Failed Percent', failed_crt_cycles/((failed_crt_cycles+successful_crt_cycles)/100)
sys.exit()
for d,obj in self.drv.items():
err = obj.checkerrors()
if err >0:
print err, d #, obj
#sys.exit(1)
o = len(obj.data['data'])
c = len(obj.data['cleaning'])
#print d, o, c
operations += o+c
print operations
def paperoutput(self):
numrmounts = 0
numvmounts = 0
tapemnts = {}
for cid, obj in self.crt.items():
r,v,e = 0,0,0
for m in obj.data['data']:
r += 1
v += len(m[DATA_INDEX_VOLUME])
for m in obj.data['errors']:
if m['m']>0:
e += 1
numvmounts += v
numrmounts += r
tapemnts[cid] = (r,v,e)
self._jsondmp(tapemnts, 'crtmnts.json', self.basedir)
print "Tapemount stats done"
# general data
# number of tapes:
numtapes = len(self.crt.keys())
numdrives = len(self.drv.keys())
numhomes = len(self.hm.keys())
print '#Robot mounts', numrmounts
print '#Volume mounts', numvmounts
print '#Tapes', numtapes
print '#Drives', numdrives
print '#Homes', numhomes
def _jsondmp(self, obj, file, basedir=None):
x = file
if basedir:
x = os.path.join(basedir, file)
with open(x, 'w') as f:
json.dump(obj, f, indent=1)
def parallel_correlation_analysis(self):
print " entering parallel correlation analysis "
interval_secs = [-1800, -1200, -600, -300, -120, -60, 0, 60, 120, 300, 600,1200, 1800]
pat = re.compile(".*cartridges_tmt_per_hour\.csv_proj_([A-Z]+)\.json")
marstotal = {}
for i in interval_secs:
marstotal[i] = []
marstotal['possum']=0
marstotal['negsum']=0
marstotal['totalerror']=0
lineBuf = StringIO.StringIO()
lineBuf.write("Project")
for i in sorted(interval_secs):
lineBuf.write(";%i"%i)
lineBuf.write(";TotalFail;NegativeSum;PositiveSum;Correlation0.8Pairs;AllPairs")
infiles = glob.glob(os.path.join(self.outputdir, 'cartridges_tmt_per_hour.csv_proj_*.json'))
for infile in reversed(infiles):
scnt=0
cnt = 0
gc.collect()
print "Starting with file ", infile
totalsuccess,totalfail = 0,0
correlated = {}
m = pat.match(infile)
project = m.groups()[0]
lineBuf.write("\n%s"%project)
with open(infile, 'r') as f:
entries = json.load(f)
t = len(entries)
for (x,y,p,s) in entries:
#print "Entry ", cnt, ' of ', t
if p >= 0.8 :
add_correlated(correlated, x,y)
scnt += 1
cnt += 1
print "finished generating correlations, found %s, failed:%i"%(scnt,cnt)
#globalsuccess = Counter()
#for i in sorted(interval_secs):
# globalsuccess[i] = []
#lock = multiprocessing.Lock()
prefix = "correlation_result_%s"%project
corrkeys = multiprocessing.Queue()
for cid in sorted(correlated.keys()):
corrkeys.put(cid)
procs = []
for i in range(multiprocessing.cpu_count()):
proc = multiprocessing.Process(target=calc_correlation, args=(corrkeys, correlated, self._get_crt, prefix, interval_secs))
proc.daemon=False
procs.append(proc)
proc.start()
for p in procs:
#p = procs.pop()
p.join()
print "waiting for process ", p
possum, negsum = 0,0
r2 = {}
for i in interval_secs:
r2[i]=[]
files = glob.glob(os.path.join("/tmp", "%s_*.json.gz"%(prefix)))
for file in files:
print 'reading',file
with gzip.open(file, 'r') as f:
result = json.load(f)
for interv, entries in result['interval'].items():
#print interv, entries
i = int(interv)
r2[i].extend(entries)
if i > 0:
possum += len(entries)
else:
negsum += len(entries)
totalfail += result['errorcnt']
if project in ['R','S', 'U','V','W', 'X', 'Y']:
marstotal['totalerror'] += totalfail
marstotal['possum'] += possum
marstotal['negsum'] += negsum
for i in sorted(interval_secs):
marstotal[i].extend(r2[i])
for i in sorted(interval_secs):
lineBuf.write(";%s"%len(r2[i]))
lineBuf.write(";%i;%i;%i"%(totalfail,negsum,possum))
lineBuf.write('\nMARS')
for i in sorted(interval_secs):
lineBuf.write(";%s"%len(marstotal[i]))
lineBuf.write(";%i;%i;%i;%i;%i"%(marstotal['totalerror'],marstotal['negsum'],marstotal['possum'],scnt,cnt))
with open(os.path.join(self.outputdir, "crt_correlation.csv"), 'w') as csv_file:
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
def debug(self):
pat = re.compile(".*cartridges_tmt_per_hour\.csv_proj_([A-Z]+)\.json")
total=0
slot='per_day'
lock = multiprocessing.Lock()
if FULLRUN or 1: # tape system stats
#res = self.drv.values()[0].pertime()
res = set()
atts = set()
#tsset = set()
slotted_ts = {}
crtlist = []
drvlist=[]
wq1 = multiprocessing.Queue()
for id in self.drv.keys():
wq1.put(id)
drvlist.append(id)
for id in self.crt.keys(): # not sorted
wq1.put(id)
crtlist.append(id)
procs = []
for i in range(multiprocessing.cpu_count()):
proc = multiprocessing.Process(target=_cb_pertime, args=(wq1,))
proc.daemon=False
procs.append(proc)
proc.start()
for p in procs:
p.join()
print "waiting for process ", p
print datetime.datetime.now(), "starting"
for id, drv in self.drv.items():
tmp = drv.pertime()
for k,v in tmp.items():
print datetime.datetime.now(), id, "drv.pertime ", k
if not k in slotted_ts.keys():
slotted_ts[k]=[]
slot = slotted_ts.get(k)
for ts in v.keys():
if not ts in slot:
slot.append(ts)
res.update([k])
if len(atts)==0:
atts.update(v[v.keys()[0]].keys())
#tsset.update(v.keys())
x = Counter()
prefix = 'tapestats'
print "first loop done ", datetime.datetime.now()
gc.collect()
for slot in res:
x[slot]={}
print datetime.datetime.now(), "slot " , slot
inqueue = multiprocessing.Queue()
for tsstr in sorted(slotted_ts[slot]):
inqueue.put(tsstr)
procs = []
#for i in range(1):
for i in range(multiprocessing.cpu_count()/2):
proc = multiprocessing.Process(target=_tapestats, args=(inqueue, slot, crtlist, self._get_crt, drvlist, self._get_drv, atts, prefix))
proc.daemon=False
procs.append(proc)
proc.start()
for p in procs:
p.join()
print "waiting for process ", p
files = glob.glob("/tmp/%s_*.json"%prefix)
for file in files:
print "reading ", file
res = _cb_gzipload(file)
for slot,elems in res.items():
if not slot in x.keys():
x[slot]={}
xs = x[slot]
for ts, tmpatt in elems.items():
if ts not in xs.keys():
xs[ts] = {}
xsts = xs[ts]
for a,v in tmpatt.items():
xsts[a] = v
for slot in sorted(x.keys()):
atts.add(VOL_MNT_LENGTH)
atts.add(CLN_TIME_LENGTH)
f = os.path.join(self.outputdir,"tapesystem_%s.csv"%slot)
print f
with open(f, 'w') as csv_file:
lineBuf = StringIO.StringIO()
lineBuf.write("timestamp")
for att in atts:
lineBuf.write(";%s"%att)
for ts in sorted(x[slot].keys()):
lineBuf.write("\n%s"%ts)
for att in sorted(atts):
linebuf_write(lineBuf,x[slot][ts].get(att, 0) )
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
def debug2(self):
res = {}
files = glob.glob(os.path.join(self.outputdir, "drives","crt_*_pertime.json"))
for file in files:
#print file
with open(file, 'r') as f:
tmp = json.load(f)
for slot, slotobj in tmp.items():
if slot != 'per_hour':
continue
if not slot in res.keys():
res[slot] = {}
for ts, args in slotobj.items():
if not ts in res[slot].keys():
res[slot][ts] = {}
for k,v in args.items():
if not k in res[slot][ts].keys():
res[slot][ts][k]=0
res[slot][ts][k] += v
#print slot,ts,k,v
for slot in res.keys():
if slot != 'per_hour':
continue
for tsstr in sorted(res[slot].keys()):
ts = datetime.datetime.strptime(tsstr, "%Y-%m-%d %H:%M:%S")
epochts = int(calendar.timegm(ts.utctimetuple()))
epochse = epochts + get_slot_size_seconds(slot, ts.month, ts.year)
# print slot,ts
res[slot][tsstr][VOL_MNT_LENGTH]=0
res[slot][tsstr][CLN_TIME_LENGTH]=0
for crt in self.crt.values(): # not sorted
res[slot][tsstr][VOL_MNT_LENGTH] += crt.get_volume(slot, ts)
for drv in self.drv.values():
res[slot][tsstr][CLN_TIME_LENGTH] += sum(drv.estimate_cleaning_time(epochts,epochse))
self._jsondmp(res, 'taperesults.json', self.outputdir)
atts = [CLN_TIME_LENGTH,VOL_MNT_LENGTH,TOTAL_MNT_TIME,TOTAL_MNTS]
for slot,slotobj in res.items():
if slot != 'per_hour':
continue
file = os.path.join(self.outputdir, "tapesystem2_%s.csv"%slot)
with open(file, 'w') as csv_file:
lineBuf = StringIO.StringIO()
lineBuf.write("timestamp")
for att in atts:
lineBuf.write(";%s"%att)
for ts in sorted(slotobj.keys()):
lineBuf.write("\n%s"%ts)
for att in atts:
linebuf_write(lineBuf,slotobj[ts].get(att, 0) )
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
def debug3(self):
# cdf time between mounts
res = []
for cid, crt in self.crt.items():
res.extend(crt.get_tbm())
file = os.path.join(self.outputdir, "timebetweenmounts_.csv")
r = sorted(res)
with open(file, 'w') as csv_file:
lineBuf = StringIO.StringIO()
lineBuf.write("percentile;timebetweenmounts")
for p in [0.01,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99]:
pt = percentile(r,p)
lineBuf.write("\n%s;%s"%(p,pt))
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
res = []
for cid, crt in self.crt.items():
res.extend(crt.get_latency())
file = os.path.join(self.outputdir, "latency.csv")
r = sorted(res)
with open(file, 'w') as csv_file:
lineBuf = StringIO.StringIO()
lineBuf.write("percentile;latency")
for p in [0.01,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.99]:
pt = percentile(r,p)
lineBuf.write("\n%s;%s"%(p,pt))
csv_file.write(lineBuf.getvalue())
csv_file.close()
lineBuf.flush()
# usage:
# 1. aggregate results, using the robot analyse py "aggregate" function
# 2. run this script with either
# a) full (takes looong, dont know how long)
# b) "-w" to load the whpss data and
# "-r" to integrate robot logs
# "-z" to check wheather the robotlogs fixed some broken whpss entries
# "-y" to derive "home" classes
# 3. "-c" to run correlation results
# "-s" to collect general statistics
# "-p" for some stats generated specifically for the fast paper, (not all of them are generated here!)
if __name__ == '__main__':
if len(sys.argv)>1:
if '-d' in sys.argv:
print "daemonizing"
python_daemon.createDaemon()
sys.stdout.close() #we close /dev/null
sys.stderr.close()
os.close(2) # and associated fd's
os.close(1)
# now we open a new stdout
# * notice that underlying fd is 1
# * bufsize is 1 because we want stdout line buffered (it's my log file)
sys.stdout = open('/tmp/bla','w',1) # redirect stdout
os.dup2(1,2) # fd 2 is now a duplicate of fd 1
sys.stderr = os.fdopen(2,'a',0) # redirect stderr
# from now on sys.stderr appends to fd 2
# * bufsize is 0, I saw this somewhere, I guess no bufferization at all is better for stderr
# now some tests... we want to know if it's bufferized or not
print "stdout"
print >> sys.stderr, "stderr"
os.system("echo stdout-echo") # this is unix only...
os.system("echo stderr-echo > /dev/stderr")
# cat /tmp/bla and check that it's ok; to kill use: pkill -f bla.py
dodump = False
if '-f' in sys.argv:
FULLRUN = True
x = os.path.join(os.getcwd(),sys.argv[1])
god = God(x)
if '-c' in sys.argv or FULLRUN:
# with Timer('Correlation (-c)'):
god.correlation()
#dodump=True
sys.exit()
elif '--correlationanalysis' in sys.argv:
#god.debug()
god.jsonload(True)
god.parallel_correlation_analysis()
sys.exit()
else:
# with Timer('jsonload'):
god.jsonload()
if '--debug' in sys.argv:
god.debug()
sys.exit()
if '--debug2' in sys.argv:
god.debug2()
sys.exit()
if '--debug3' in sys.argv:
god.debug3()
sys.exit()
# with Timer('WHPSS read (-w)'):
if '-w' in sys.argv or FULLRUN:
god.whpss_read()
# with Timer('WHPSSdone, Immediately json dump'):
god.jsondump()
#with Timer("Robot read: (-r)"):
if '-r' in sys.argv or FULLRUN:
god.robot_read()
# with Timer('Robotdone, Immediately json dump'):
god.jsondump()
pass
# with Timer("Check for fixed errors: (-z)"):
if '-z' in sys.argv or FULLRUN:
god.collect_recovered_errors()
dodump=True
# with Timer("derive homes (-y)"):
if '-y' in sys.argv or FULLRUN:
god.derive_homes()
dodump = True
# with Timer("Cnt errors (-e)"):
if '-e' in sys.argv:
god.checkerrors()
dodump=True
#with Timer('stats: (-s)'):
if '-s' in sys.argv or FULLRUN:
god.stats()
#dodump=True
#with Timer("Highest Level Stats (-hh)"):
if '-hh' in sys.argv or FULLRUN:
god.highestlevelstats()
#with Timer ("Generate Paper output (-p)"):
if '-p' in sys.argv or FULLRUN:
god.paperoutput()
if dodump:
# with Timer('json dump'):
god.jsondump()
| mit |
altairpearl/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
ywang007/odo | odo/backends/sql.py | 1 | 22308 | from __future__ import absolute_import, division, print_function
from operator import attrgetter
import os
import re
import subprocess
from itertools import chain
from collections import Iterator
from datetime import datetime, date
from distutils.spawn import find_executable
import pandas as pd
import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import event
from sqlalchemy.schema import CreateSchema
from multipledispatch import MDNotImplementedError
import datashape
from datashape import DataShape, Record, Option, var, dshape
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover
from datashape.dispatch import dispatch
from toolz import (partition_all, keyfilter, memoize, valfilter,
identity, concat, curry, merge)
from toolz.curried import pluck, map
from ..compatibility import unicode
from ..utils import keywords, ignoring, iter_except
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
from .csv import CSV
base = (int, float, datetime, date, bool, str)
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {
'int64': sa.types.BigInteger,
'int32': sa.types.Integer,
'int': sa.types.Integer,
'int16': sa.types.SmallInteger,
'float32': sa.types.Float(precision=24), # sqlalchemy uses mantissa
'float64': sa.types.Float(precision=53), # for precision
'float': sa.types.Float(precision=53),
'real': sa.types.Float(precision=53),
'string': sa.types.Text,
'date': sa.types.Date,
'time': sa.types.Time,
'datetime': sa.types.DateTime,
'bool': sa.types.Boolean,
"timedelta[unit='D']": sa.types.Interval(second_precision=0,
day_precision=9),
"timedelta[unit='h']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='m']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='s']": sa.types.Interval(second_precision=0,
day_precision=0),
"timedelta[unit='ms']": sa.types.Interval(second_precision=3,
day_precision=0),
"timedelta[unit='us']": sa.types.Interval(second_precision=6,
day_precision=0),
"timedelta[unit='ns']": sa.types.Interval(second_precision=9,
day_precision=0),
# ??: sa.types.LargeBinary,
# Decimal: sa.types.Numeric,
# ??: sa.types.PickleType,
# unicode: sa.types.Unicode,
# unicode: sa.types.UnicodeText,
# str: sa.types.Text, # ??
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({
sa.types.DATETIME: 'datetime',
sa.types.TIMESTAMP: 'datetime',
sa.types.FLOAT: 'float64',
sa.types.DATE: 'date',
sa.types.BIGINT: 'int64',
sa.types.INTEGER: 'int',
sa.types.NUMERIC: 'float64', # TODO: extend datashape to decimal
sa.types.BIGINT: 'int64',
sa.types.NullType: 'string',
sa.types.Float: 'float64',
})
# interval types are special cased in discover_typeengine so remove them from
# revtypes
revtypes = valfilter(lambda x: not isinstance(x, sa.types.Interval), revtypes)
units_of_power = {
0: 's',
3: 'ms',
6: 'us',
9: 'ns'
}
# these aren't loaded by sqlalchemy by default
sa.dialects.registry.load('oracle')
sa.dialects.registry.load('postgresql')
def batch(sel, chunksize=10000):
"""Execute `sel`, streaming row at a time and fetching from the database in
batches of size `chunksize`.
Parameters
----------
sel : sa.sql.Selectable
Selectable to execute
chunksize : int, optional, default 10000
Number of rows to fetch from the database
"""
def rowterator(sel, chunksize=chunksize):
with sel.bind.connect() as conn:
result = conn.execute(sel)
yield result.keys()
for rows in iter_except(curry(result.fetchmany, size=chunksize),
sa.exc.ResourceClosedError):
if rows:
yield rows
else:
return
terator = rowterator(sel)
return next(terator), concat(terator)
@discover.register(sa.dialects.postgresql.base.INTERVAL)
def discover_postgresql_interval(t):
return discover(sa.types.Interval(day_precision=0,
second_precision=t.precision))
@discover.register(sa.dialects.oracle.base.INTERVAL)
def discover_oracle_interval(t):
return discover(t.adapt(sa.types.Interval))
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if isinstance(typ, sa.types.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return dshape(revtypes[type(typ)])[0]
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, typ.collation)
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.Column)
def discover_sqlalchemy_column(col):
optionify = Option if col.nullable else identity
return Record([[col.name, optionify(discover(col.type))]])
@discover.register(sa.sql.FromClause)
def discover_sqlalchemy_selectable(t):
records = list(sum([discover(c).parameters[0] for c in t.columns], ()))
return var * Record(records)
@memoize
def metadata_of_engine(engine, schema=None):
return sa.MetaData(engine, schema=schema)
def create_engine(uri, *args, **kwargs):
if ':memory:' in uri:
return sa.create_engine(uri, *args, **kwargs)
else:
return memoized_create_engine(uri, *args, **kwargs)
memoized_create_engine = memoize(sa.create_engine)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = metadata_of_engine(engine)
if tablename not in metadata.tables:
try:
metadata.reflect(engine,
views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect(engine)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
metadata = metadata_of_engine(engine)
return discover(metadata)
@dispatch(sa.MetaData)
def discover(metadata):
try:
metadata.reflect(views=metadata.bind.dialect.supports_views)
except NotImplementedError:
metadata.reflect()
pairs = []
for table in sorted(metadata.tables.values(), key=attrgetter('name')):
name = table.name
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % "\n\t".join(e.args) +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def dshape_to_table(name, ds, metadata=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
if metadata is None:
metadata = sa.MetaData()
cols = dshape_to_alchemy(ds)
t = sa.Table(name, metadata, *cols, schema=metadata.schema)
return attach_schema(t, t.schema)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, schema=None, **kwargs):
assert isrecord(ds), 'datashape must be Record type, got %s' % ds
metadata = metadata_of_engine(engine, schema=schema)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata)
t.create()
return engine
def dshape_to_alchemy(dshape):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(typ),
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1])
else:
return dshape_to_alchemy(dshape[0])
if isinstance(dshape, datashape.String):
fixlen = dshape[0].fixlen
if fixlen is None:
return sa.types.Text
string_types = dict(U=sa.types.Unicode, A=sa.types.String)
assert dshape.encoding is not None
return string_types[dshape.encoding[0]](length=fixlen)
if isinstance(dshape, datashape.DateTime):
if dshape.tz:
return sa.types.DateTime(timezone=True)
else:
return sa.types.DateTime(timezone=False)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, **kwargs):
_, rows = batch(sa.select([t]))
return map(tuple, rows)
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, **kwargs):
func = pluck(0) if dshape and isscalar(dshape.measure) else map(tuple)
_, rows = batch(sel)
return func(rows)
@convert.register(base, sa.sql.Select, cost=300.0)
def select_to_base(sel, dshape=None, **kwargs):
assert not dshape or isscalar(dshape), \
'dshape should be None or scalar, got %s' % dshape
with sel.bind.connect() as conn:
return conn.execute(sel).scalar()
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, **kwargs):
assert not isinstance(t, type)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if set(names) != set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
engine = t.bind
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
return t
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.Table)
def append_table_to_sql_Table(t, o, **kwargs):
# This condition is an ugly kludge and should be removed once
# https://github.com/dropbox/PyHive/issues/15 is resolved
if t.bind.name == o.bind.name == 'hive':
with t.bind.connect() as conn:
conn.execute('INSERT INTO TABLE %s SELECT * FROM %s' %
(t.name, o.name))
return t
s = sa.select([o])
return append(t, s, **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, **kwargs):
if not o.bind == t.bind:
return append(t, convert(Iterator, o, **kwargs), **kwargs)
assert o.bind.has_table(t.name, t.schema), \
'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with o.bind.connect() as conn:
conn.execute(query)
return t
def should_create_schema(ddl, target, bind, tables=None, state=None,
checkfirst=None, **kwargs):
return ddl.element not in inspect(target.bind).get_schema_names()
def attach_schema(obj, schema):
if schema is not None:
ddl = CreateSchema(schema, quote=True)
event.listen(obj,
'before_create',
ddl.execute_if(callable_=should_create_schema,
dialect='postgresql'))
return obj
def fullname(table, compiler):
preparer = compiler.dialect.identifier_preparer
fullname = preparer.quote_identifier(table.name)
schema = table.schema
if schema is not None:
fullname = '%s.%s' % (preparer.quote_schema(schema), fullname)
return fullname
@resource.register(r'(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
kwargs2 = keyfilter(keywords(sa.create_engine).__contains__, kwargs)
engine = create_engine(uri, **kwargs2)
ds = kwargs.get('dshape')
schema = kwargs.get('schema')
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(sa.Table(table_name, metadata, autoload=True,
autoload_with=engine, schema=schema),
schema)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] += '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 += '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table):
table.drop(table.bind, checkfirst=True)
@convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
def select_or_selectable_to_frame(el, **kwargs):
columns, rows = batch(el)
row = next(rows, None)
if row is None:
return pd.DataFrame(columns=columns)
return pd.DataFrame(list(chain([tuple(row)], map(tuple, rows))),
columns=columns)
class CopyToCSV(sa.sql.expression.Executable, sa.sql.ClauseElement):
def __init__(self, element, path, delimiter=',', quotechar='"',
lineterminator=r'\n', escapechar='\\', header=True,
na_value=''):
self.element = element
self.path = path
self.delimiter = delimiter
self.quotechar = quotechar
self.lineterminator = lineterminator
# mysql cannot write headers
self.header = header and element.bind.dialect.name != 'mysql'
self.escapechar = escapechar
self.na_value = na_value
@compiles(CopyToCSV, 'postgresql')
def compile_copy_to_csv_postgres(element, compiler, **kwargs):
selectable = element.element
istable = isinstance(selectable, sa.Table)
template = """COPY %s TO '{path}'
WITH CSV {header}
DELIMITER '{delimiter}'
QUOTE '{quotechar}'
NULL '{na_value}'
ESCAPE '{escapechar}'
""" % ('{query}' if istable else '({query})')
processed = (fullname(selectable, compiler)
if istable else compiler.process(selectable))
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
return template.format(query=processed,
path=element.path,
header='HEADER' if element.header else '',
delimiter=element.delimiter,
quotechar=element.quotechar,
na_value=element.na_value,
escapechar=element.escapechar)
@compiles(CopyToCSV, 'mysql')
def compile_copy_to_csv_mysql(element, compiler, **kwargs):
selectable = element.element
if isinstance(selectable, sa.Table):
processed = 'SELECT * FROM %(table)s' % dict(table=selectable.name)
else:
processed = compiler.process(selectable)
assert processed, ('got empty string from processing element of type %r' %
type(selectable).__name__)
template = """{query} INTO OUTFILE '{path}'
FIELDS TERMINATED BY '{delimiter}'
OPTIONALLY ENCLOSED BY '{quotechar}'
ESCAPED BY '{escapechar}'
LINES TERMINATED BY '{lineterminator}'"""
return template.format(query=processed,
path=element.path,
delimiter=element.delimiter,
lineterminator=element.lineterminator,
escapechar=element.escapechar.encode('unicode-escape').decode(),
quotechar=element.quotechar)
@compiles(CopyToCSV, 'sqlite')
def compile_copy_to_csv_sqlite(element, compiler, **kwargs):
if not find_executable('sqlite3'):
raise MDNotImplementedError("Could not find sqlite executable")
selectable = element.element
sql = (compiler.process(sa.select([selectable])
if isinstance(selectable, sa.Table)
else selectable) + ';')
sql = re.sub(r'\s{2,}', ' ', re.sub(r'\s*\n\s*', ' ', sql)).encode()
cmd = ['sqlite3', '-csv',
'-%sheader' % ('no' if not element.header else ''),
'-separator', element.delimiter,
selectable.bind.url.database]
with open(element.path, mode='at') as f:
subprocess.Popen(cmd, stdout=f, stdin=subprocess.PIPE).communicate(sql)
# This will be a no-op since we're doing the write during the compile
return ''
@append.register(CSV, sa.sql.Selectable)
def append_table_to_csv(csv, selectable, dshape=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(selectable, os.path.abspath(csv.path), **kwargs)
with selectable.bind.begin() as conn:
conn.execute(stmt)
csv.has_header = stmt.header
return csv
try:
from .hdfs import HDFS
except ImportError:
pass
else:
@append.register(HDFS(CSV), sa.sql.Selectable)
def append_selectable_to_hdfs_csv(*args, **kwargs):
raise MDNotImplementedError()
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/cross_validation.py | 47 | 67782 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3, shuffle=False, random_state=None):
super(LabelKFold, self).__init__(len(labels), n_folds, shuffle,
random_state)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
for i in range(self.n_folds):
yield (self.idxs == i)
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
alexblaessle/PyFRAP | pyfrp/modules/pyfrp_gmsh_geometry.py | 2 | 149622 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""PyFRAP module for creating/extracting gmsh geometries for PyFRAP toolbox. Module mainly has the following classes:
* A ``domain`` class, acting as a canvas.
* A ``vertex`` class, substituting gmsh's *Point*.
* A ``edge`` class, parenting all different kind of edges.
* A ``line`` class, substituting gmsh's *Line*.
* A ``arc`` class, substituting gmsh's *Circle*.
* A ``bSpline`` class, substituting gmsh's *bSpline*.
* A ``lineLoop`` class, substituting gmsh's *Line Loop*.
* A ``ruledSurface`` class, substituting gmsh's *Ruled Surface*.
* A ``surfaceLoop`` class, substituting gmsh's *Surface Loop*.
* A ``volume`` class, substituting gmsh's *Volume*.
* A ``field`` class, parenting all different kind of fields.
* A ``attractorField`` class, substituting gmsh's *Attractor* field.
* A ``boundaryLayerField`` class, substituting gmsh's *Boundary Layer* field.
* A ``thresholdField`` class, substituting gmsh's *Threshold* field.
* A ``minField`` class, substituting gmsh's *Min* field.
This module together with pyfrp.pyfrp_gmsh_IO_module and pyfrp.pyfrp_gmsh_module works partially as a python gmsh wrapper, however is incomplete.
If you want to know more about gmsh, go to http://gmsh.info/doc/texinfo/gmsh.html .
"""
#===========================================================================================================================================================================
#Importing necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#String
import string
#PyFRAP modules
import pyfrp_plot_module
from pyfrp_term_module import *
import pyfrp_misc_module
import pyfrp_gmsh_IO_module
import pyfrp_idx_module
import pyfrp_geometry_module
import pyfrp_IO_module
import pyfrp_vtk_module
#Matplotlib
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import copy as cpy
#===========================================================================================================================================================================
#Class definitions
#===========================================================================================================================================================================
class domain:
"""Domain class storing embryo geometry entities.
Args:
edges (list): List of edges.
vertices (list): List of vertices.
arcs (list): List of arcs.
lines (list): List of lines.
bSplines (list): List of bSplines.
lineLoops (list): List of lineLoops.
surfaceLoops (list): List of surfaceLoops.
ruledSurfaces (list): List of ruledSurfaces.
volumes (list): List of volumes.
fields (list): List of fields.
annXOffset (float): Offset of annotations in x-direction.
annYOffset (float): Offset of annotations in y-direction.
annZOffset (float): Offset of annotations in z-direction.
"""
def __init__(self):
#Lists to keep track of all geometrical entities.
self.edges=[]
self.vertices=[]
self.arcs=[]
self.lines=[]
self.bSplines=[]
self.lineLoops=[]
self.ruledSurfaces=[]
self.surfaceLoops=[]
self.volumes=[]
self.fields=[]
self.bkgdField=None
#Some settings for plotting
self.annXOffset=3.
self.annYOffset=3.
self.annZOffset=3.
def addVertex(self,x,Id=None,volSize=None,checkExist=False):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.vertex` instance
at point ``x`` and appends it to ``vertices`` list.
.. note:: ``volSize`` does not have any effect on the geometry itself but is simply
stored in the vertex object for further usage.
If ``checkExist=True``, checks if a vertex at same location already exists. If so, will return
that vertex instead.
Args:
x (numpy.ndarray): Coordinate of vertex.
Keyword Args:
Id (int): ID of vertex.
volSize (float): Element size at vertex.
checkExist (bool): Checks if a vertex at same location already exists.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.vertex: New vertex instance.
"""
if checkExist:
for v in self.vertices:
if (v.x==x).all():
return v
newId=self.getNewId(self.vertices,Id)
v=vertex(self,x,newId,volSize=volSize)
self.vertices.append(v)
return v
def addLine(self,v1,v2,Id=None):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.line` instance
at point ``x`` and appends it to ``edges`` and ``lines`` list.
Args:
v1 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Start vertex.
v2 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): End vertex.
Keyword Args:
Id (int): ID of line.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.line: New line instance.
"""
newId=self.getNewId(self.edges,Id)
e=line(self,v1,v2,newId)
self.lines.append(e)
self.edges.append(e)
return e
def addArc(self,vstart,vcenter,vend,Id=None):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.arc` instance
at point ``x`` and appends it to ``edges`` and ``arcs`` list.
Args:
vstart (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Start vertex.
vcenter (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Center vertex.
vend (pyfrp.modules.pyfrp_gmsh_geometry.vertex): End vertex.
Keyword Args:
Id (int): ID of arc.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.arc: New line instance.
"""
newId=self.getNewId(self.edges,Id)
a=arc(self,vstart,vcenter,vend,newId)
self.arcs.append(a)
self.edges.append(a)
return a
def addBSpline(self,vertices,Id=None):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.line` instance
at point ``x`` and appends it to ``edges`` and ``lines`` list.
Args:
vertices (list): List of vertex objects.
Keyword Args:
Id (int): ID of spline.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.bSpline: New spline instance.
"""
newId=self.getNewId(self.edges,Id)
e=bSpline(self,vertices,newId)
self.bSplines.append(e)
self.edges.append(e)
return e
def addCircleByParameters(self,center,radius,z,volSize,plane="z",genLoop=False,genSurface=False,checkExist=True):
"""Adds circle to domain by given center and radius.
Will create 5 new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.vertex` objects
``[vcenter,v1,v2,v3,v4]`` and four new `pyfrp.modules.pyfrp_gmsh_geometry.arc` objects
[a1,a2,a3,a4] and builds circle.
Circle will be at ``z=z`` and vertices will have mesh size ``volSize``.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addCircleByParameters([256,256],100,50,30.)
>>> d.addCircleByParameters([256,256],100,50,30.,plane="x")
>>> d.addCircleByParameters([256,256],100,50,30.,plane="y")
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addCircleByParameters.png
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
Args:
center (numpy.ndarray): Center of circle.
radius (float): Radius of the circle.
z (float): Height at which circle is placed.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which circle is placed.
genLoop (bool): Create lineLoop.
genSurface (bool): Create ruledSurface.
checkExist (bool): Checks if a vertex at same location already exists.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* arcs (list): List of arcs.
* loop (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): Line loop.
* surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Ruled Surface.
"""
# Define coordinates
xcenter=pyfrp_geometry_module.flipCoordinate([center[0],center[1],z],plane,origAxis="z")
x1=pyfrp_geometry_module.flipCoordinate([center[0]+radius,center[1],z],plane,origAxis="z")
x2=pyfrp_geometry_module.flipCoordinate([center[0],center[1]+radius,z],plane,origAxis="z")
x3=pyfrp_geometry_module.flipCoordinate([center[0]-radius,center[1],z],plane,origAxis="z")
x4=pyfrp_geometry_module.flipCoordinate([center[0],center[1]-radius,z],plane,origAxis="z")
# Add vertices
vcenter=self.addVertex(xcenter,volSize=volSize)
v1=self.addVertex(x1,volSize=volSize,checkExist=checkExist)
v2=self.addVertex(x2,volSize=volSize,checkExist=checkExist)
v3=self.addVertex(x3,volSize=volSize,checkExist=checkExist)
v4=self.addVertex(x4,volSize=volSize,checkExist=checkExist)
# Add Arcs
a1=self.addArc(v1,vcenter,v2)
a2=self.addArc(v2,vcenter,v3)
a3=self.addArc(v3,vcenter,v4)
a4=self.addArc(v4,vcenter,v1)
if genLoop or genSurface:
loop=self.addLineLoop(edgeIDs=[a1.Id,a2.Id,a3.Id,a4.Id])
else:
loop=None
if genSurface:
surface=self.addRuledSurface(lineLoopID=loop.Id)
else:
surface=None
return [vcenter,v1,v2,v3,v4],[a1,a2,a3,a4],loop,surface
def addPolygonByParameters(self,coords,volSize,z=0.,plane="z",genLoop=False,genSurface=False):
"""Adds polygon to domain by given vertex coordinates.
Will create a list of new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.vertex` objects
and a list of new `pyfrp.modules.pyfrp_gmsh_geometry.line` objects
connecting the vertices.
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
.. note:: Vertices can be given either as a
* list of coordinate triples ``[[x1,y1,z1],[x2,y2,z2],...]``.
* list of x-y-coordinates and a given z-coordinate ``[[x1,y1,z],[x2,y2,z],...]``.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addPolygonByParameters([[100,100,100],[200,200,100],[200,100,100]],30.)
>>> d.addPolygonByParameters([[100,100,100],[200,200,100],[200,100,100]],30.,plane="x")
>>> d.addPolygonByParameters([[100,100,100],[200,200,100],[200,100,100]],30.,plane="y")
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addPolygonByParameters.png
.. note:: Vertices are created in the order of the coordinates and connected in the same order.
Args:
coords (list): List of coordinates.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which polygon is placed.
z (float): Height at which polygon is placed.
genLoop (bool): Create lineLoop.
genSurface (bool): Create ruledSurface.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* lines (list): List of connecting lines.
* loop (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): Line loop.
* surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Ruled Surface.
"""
# Define coordinates
xs=[]
for c in coords:
if len(c)==3:
xs.append(pyfrp_geometry_module.flipCoordinate([c[0],c[1],c[2]],plane,origAxis="z"))
else:
xs.append(pyfrp_geometry_module.flipCoordinate([c[0],c[1],z],plane,origAxis="z"))
# Add vertices
vertices=[]
for x in xs:
vertices.append(self.addVertex(x,volSize=volSize))
# Add Lines
lines=[]
for i in range(len(vertices)):
lines.append(self.addLine(vertices[i],vertices[pyfrp_misc_module.modIdx(i+1,vertices)]))
# Add LineLoop
if genLoop or genSurface:
loop=self.addLineLoop(edgeIDs=pyfrp_misc_module.objAttrToList(lines,'Id'))
else:
loop=None
# Add surface
if genSurface:
surface=self.addRuledSurface(lineLoopID=loop.Id)
else:
surface=None
return vertices,lines,loop,surface
def addRectangleByParameters(self,offset,sidelengthX,sidelengthY,z,volSize,plane="z"):
"""Adds rectangle to domain by given offset and sidelengths.
Will create a list of four :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.vertex` objects
and a list of four `pyfrp.modules.pyfrp_gmsh_geometry.line` objects
connecting the vertices.
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
.. note:: The ``offset`` is defined as the bottom left corner.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addRectangleByParameters([256,256],100,200,50,30.)
>>> d.addRectangleByParameters([256,256],100,200,50,30.,plane="x")
>>> d.addRectangleByParameters([256,256],100,200,50,30.,plane="y")
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addRectangleByParameters.png
Args:
offset (numpy.ndarray): Offset of rectangle.
sidelengthX (float): Sidelength in x-direction.
sidelengthY (float): Sidelength in y-direction.
z (float): Height at which rectangle is placed.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which rectangle is placed.
genLoop (bool): Create lineLoop.
genSurface (bool): Create ruledSurface.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* lines (list): List of connecting lines.
* loop (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): Line loop.
* surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Ruled Surface.
"""
coords=[[offset[0],offset[1],z],[offset[0]+sidelengthX,offset[1],z],
[offset[0]+sidelengthX,offset[1]+sidelengthY,z],[offset[0],offset[1]+sidelengthY,z]]
return self.addPolygonByParameters(coords,volSize,plane=plane)
def addSquareByParameters(self,offset,sidelength,z,volSize,plane="z"):
"""Adds square to domain by given offset and sidelength.
Will create a list of four :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.vertex` objects
and a list of four `pyfrp.modules.pyfrp_gmsh_geometry.line` objects
connecting the vertices.
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
.. note:: The ``offset`` is defined as the bottom left corner.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addSquareByParameters([256,256],100,50,30.)
>>> d.addSquareByParameters([256,256],100,50,30.,plane="x")
>>> d.addSquareByParameters([256,256],100,50,30.,plane="y")
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addSquareByParameters.png
Args:
offset (numpy.ndarray): Offset of square.
sidelength (float): Sidelength of square.
z (float): Height at which square is placed.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which square is placed.
genLoop (bool): Create lineLoop.
genSurface (bool): Create ruledSurface.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* lines (list): List of connecting lines.
* loop (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): Line loop.
* surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Ruled Surface.
"""
return self.addRectangleByParameters(offset,sidelength,sidelength,z,volSize,plane=plane)
def addPrismByParameters(self,coords,volSize,height=1.,z=0.,plane="z",genLoops=True,genSurfaces=True,genSurfaceLoop=True,genVol=True):
r"""Adds prism to domain by given vertex coordinates.
Will create:
* 2 new polygons, see :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addPolygonByParameters`.
* n :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.line` objects connecting the two polyogns.
If selected, will create:
* n+2 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.lineLoop` objects around the 6 surfaces.
* n+2 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface` objects.
* 1 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop`.
* 1 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.volume`.
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
.. note:: Vertices can be given either as a
* list of coordinate triples ``[[x1,y1,z1],[x2,y2,z2],...]``. Then the list of vertices needs to be of length :math:`2n`, where
where :math:`n` is the number of corners of the top and lower polygon. Otherwise :py:func:`addPrismByParameters` will crash.
* list of x-y-coordinates, a given z-coordinate and height. This will place the vertices at ``[[x1,y1,z],[x2,y2,z],...]`` and
``[[x1,y1,z+height],[x2,y2,z+height],...]``.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addPrismByParameters([[256,256],[200,220],[200,200],[210,210],[220,200]],30.,z=50.,height=40.,plane="z",genLoops=True,genSurfaces=True,genVol=True)
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addPrismByParameters.png
.. note:: Vertices are created in the order of the coordinates and connected in the same order.
Args:
coords (list): List of coordinates.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which prism is placed.
z (float): Height at which first polygon is placed.
height (float): Height of prism.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genSurfaceLoop (bool): Generate surface loop.
genVol (bool): Generate corresponding volume.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* lines (list): List of lines.
* loops (list): List of loops.
* surfaces (list): List of surfaces.
* surfaceLoop (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): Generated surface loop.
* vol (pyfrp.modules.pyfrp_gmsh_geometry.volume): Generated volume.
"""
# Create upper and lower polygons
if len(coords[0])==3:
if np.mod(len(coords),2)!=0:
printError("addPrismByParameters: You gave a list of 3-dimensional vertex coordinates. However,the number of coordinates is odd, will not be able to continue.")
return
vertices,lines,ltemp,stemp = self.addPolygonByParameters(coords,volSize,z=0.,plane="z",genSurface=genSurface,genLoop=genLoop)
vertices1=vertices[:len(vertices)/2]
vertices2=vertices[len(vertices)/2:]
lines1=lines[:len(lines)/2]
lines2=lines[len(lines)/2:]
else:
vertices1,lines1,ltemp,stemp = self.addPolygonByParameters(coords,volSize,z=z,plane="z")
vertices2,lines2,ltemp,stemp = self.addPolygonByParameters(coords,volSize,z=z+height,plane="z")
# Connect them with lines
lines3=[]
for i in range(len(vertices1)):
lines3.append(self.addLine(vertices1[i],vertices2[i]))
# Add loops
loops=[]
if genLoops:
# Loops of upper and lower polygon
loops.append(self.addLineLoop(edgeIDs=pyfrp_misc_module.objAttrToList(lines1,"Id")))
loops.append(self.addLineLoop(edgeIDs=pyfrp_misc_module.objAttrToList(lines2,"Id")))
# Loops of side faces
for i in range(len(lines1)):
loops.append(self.addLineLoop(edgeIDs=[-lines1[i].Id,lines3[i].Id,lines2[i].Id,-lines3[pyfrp_misc_module.modIdx(i+1,lines1)].Id]))
# Add surfaces
surfaces=[]
if genSurfaces:
for loop in loops:
surfaces.append(self.addRuledSurface(lineLoopID=loop.Id))
# Make surface loop
if genSurfaceLoop:
surfaceLoop=self.addSurfaceLoop(surfaceIDs=pyfrp_misc_module.objAttrToList(surfaces,'Id'))
else:
surfaceLoop=None
# Make volume
if genVol:
vol=self.addVolume(surfaceLoopID=surfaceLoop.Id)
else:
vol=None
return [vertices1,vertices2],[lines1,lines2,lines3],loops,surfaces,surfaceLoop,vol
def addCuboidByParameters(self,offset,sidelengthX,sidelengthY,height,volSize,plane="z",genLoops=True,genSurfaces=True,genSurfaceLoop=True,genVol=True):
"""Adds Cuboid to domain by given offset, sidelengths in x- and y-direction and height.
Will define vertices and then call :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addPrismByParameters`.
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.draw()
will generate:
.. image:: ../imgs/pyfrp_gmsh_geometry/addCuboidByParameters.png
Args:
offset (numpy.ndarray): Offset of cuboid.
sidelengthX (float): Sidelength in x-direction.
sidelengthY (float): Sidelength in y-direction.
height (float): Height of cuboid.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which prism is placed.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genSurfaceLoop (bool): Generate surface loop.
genVol (bool): Generate corresponding volume.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* lines (list): List of lines.
* loops (list): List of loops.
* surfaces (list): List of surfaces.
* surfaceLoop (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): Generated surface loop.
* vol (pyfrp.modules.pyfrp_gmsh_geometry.volume): Generated volume.
"""
# Define coordinates
coords=[[offset[0],offset[1]],[offset[0]+sidelengthX,offset[1]],
[offset[0]+sidelengthX,offset[1]+sidelengthY],[offset[0],offset[1]+sidelengthY]]
return self.addPrismByParameters(coords,volSize,height=height,z=offset[2],plane="z",genLoops=genLoops,genSurfaces=genSurfaces,genSurfaceLoop=genSurfaceLoop,genVol=genVol)
def addBallByParameters(self,center,radius,z,volSize,genLoops=True,genSurfaces=True,genSurfaceLoop=True,genVol=True,checkExist=True):
"""Adds ball to domain by given center and radius.
Will create.
* 3 new circles, see :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addCircleByParameters`.
If selected, will create:
* 8 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.lineLoop` objects around the 8 surfaces.
* 8 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface` objects.
* 1 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop`.
* 1 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.volume`.
For example:
>>> center=[256,50]
>>> radius=100
>>> Z=0
>>> volSize=20
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addBallByParameters(center,radius,Z,volSize,genLoops=True,genSurfaces=True,genVol=True,checkExist=True)
>>> d.draw()
would return:
.. image:: ../imgs/pyfrp_gmsh_geometry/addBallByParameters.png
Args:
center (numpy.ndarray): Center of cylinder.
radius (float): Radius of the cylinder.
z (float): Height at which cylinder is placed.
volSize (float): Mesh size of vertices.
Keyword Args:
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genSurfaceLoop (bool): Generate surface loop.
genVol (bool): Generate volume.
checkExist (bool): Checks if a vertex at same location already exists.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* arcs (list): List of arcs.
* loops (list): List of loops.
* surfaces (list): List of surfaces.
* surfaceLoop (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): Generated surface loop.
* vol (pyfrp.modules.pyfrp_gmsh_geometry.volume): Generated volume.
"""
# Add 3 circles
v1,a1,ll1,s1=self.addCircleByParameters(center,radius,z,volSize,genLoop=False)
v2,a2,ll2,s2=self.addCircleByParameters([z,center[0]],radius,center[1],volSize,genLoop=False,plane='x')
v3,a3,ll3,s3=self.addCircleByParameters([center[1],z],radius,center[0],volSize,genLoop=False,plane='y')
vertices=v1+v2+v3
arcs=a1+a2+a3
# Define line loops
ll=[]
ll.append([a1[0],a2[0],a3[0]])
ll.append([a1[1],a3[0],a2[3]])
ll.append([a1[2],a2[3],a3[1]])
ll.append([a1[3],a3[1],a2[0]])
ll.append([a1[0],a2[1],a3[3]])
ll.append([a1[1],a3[3],a2[2]])
ll.append([a1[2],a2[2],a3[2]])
ll.append([a1[3],a3[2],a2[1]])
# Generate line loops
lineLoops=[]
if genLoops:
for l in ll:
lnew=self.addLineLoop(edgeIDs=pyfrp_misc_module.objAttrToList(l,'Id'))
lnew.fix()
lineLoops.append(lnew)
# Generate surfaces
surfaces=[]
if genSurfaces:
for l in lineLoops:
surfaces.append(self.addRuledSurface(lineLoopID=l.Id))
# Make surface loop
if genSurfaceLoop:
surfaceLoop=self.addSurfaceLoop(surfaceIDs=pyfrp_misc_module.objAttrToList(surfaces,'Id'))
else:
surfaceLoop=None
# Make volume
if genVol:
vol=self.addVolume(surfaceLoopID=surfaceLoop.Id)
else:
vol=None
return vertices,arcs,lineLoops,surfaces,surfaceLoop,vol
def addCylinderByParameters(self,center,radius,z,height,volSize,plane="z",genLoops=True,genSurfaces=True,genSurfaceLoop=True,genVol=True,checkExist=True):
"""Adds cylinder to domain by given center and radius and height.
Will create.
* 2 new circles at ``z=z`` and ``z=z+height``, see :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addCircleByParameters`.
* 4 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.line` objects connecting the two circles.
If selected, will create:
* 6 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.lineLoop` objects around the 6 surfaces.
* 6 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface` objects.
* 1 :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop`.
* 1 corresponding :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.volume`.
For example:
>>> d=pyfrp_gmsh_geometry.domain()
>>> d.addCylinderByParameters([256,256],100,50,100,30.,plane="z",genLoops=True,genSurfaces=True,genVol=True)
>>> d.draw()
would return:
.. image:: ../imgs/pyfrp_gmsh_geometry/addCylinderByParameters.png
.. note:: Plane can be given as ``"x","y","z"``. See also :py:func:`pyfrp.modules.pyfrp_geometry_module.flipCoordinate`.
Args:
center (numpy.ndarray): Center of cylinder.
radius (float): Radius of the cylinder.
z (float): Height at which cylinder is placed.
height (float): Height of cylinder.
volSize (float): Mesh size of vertices.
Keyword Args:
plane (str): Plane in which cylinder is placed.
genLoops (bool): Generate line loops.
genSurfaces (bool): Generate surfaces.
genSurfaceLoop (bool): Generate surface loop.
genVol (bool): Generate volume.
checkExist (bool): Checks if a vertex at same location already exists.
Returns:
tuple: Tuple containing:
* vertices (list): List of vertices.
* arcs (list): List of arcs.
* lines (list): List of lines.
* loops (list): List of loops.
* surfaces (list): List of surfaces.
* surfaceLoop (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): Generated surface loop.
* vol (pyfrp.modules.pyfrp_gmsh_geometry.volume): Generated volume.
"""
# Check input
if genVol and not genSurfaces:
printError("Cannot create volume when there are no surfaces.")
if genSurfaces and not genLoops:
printError("Cannot create surfaces when there are no loops.")
# Create circles
vertices1,arcs1,ltemp,stemp=self.addCircleByParameters(center,radius,z,volSize,plane=plane)
vertices2,arcs2,ltemp,stemp=self.addCircleByParameters(center,radius,z+height,volSize,plane=plane)
# Create connecting lines
lines=[]
lines.append(self.addLine(vertices1[1],vertices2[1]))
lines.append(self.addLine(vertices1[2],vertices2[2]))
lines.append(self.addLine(vertices1[3],vertices2[3]))
lines.append(self.addLine(vertices1[4],vertices2[4]))
# Generate loops
loops=[]
if genLoops:
loops.append(self.addLineLoop(edgeIDs=[arcs1[0].Id,arcs1[1].Id,arcs1[2].Id,arcs1[3].Id]))
loops.append(self.addLineLoop(edgeIDs=[arcs2[0].Id,arcs2[1].Id,arcs2[2].Id,arcs2[3].Id]))
loops.append(self.addLineLoop(edgeIDs=[-lines[0].Id,arcs1[0].Id,lines[1].Id,-arcs2[0].Id]))
loops.append(self.addLineLoop(edgeIDs=[-lines[1].Id,arcs1[1].Id,lines[2].Id,-arcs2[1].Id]))
loops.append(self.addLineLoop(edgeIDs=[-lines[2].Id,arcs1[2].Id,lines[3].Id,-arcs2[2].Id]))
loops.append(self.addLineLoop(edgeIDs=[-lines[3].Id,arcs1[3].Id,lines[0].Id,-arcs2[3].Id]))
# Generate surfaces
surfaces=[]
surfaceIds=[]
if genSurfaces:
for loop in loops:
surfaces.append(self.addRuledSurface(lineLoopID=loop.Id))
surfaceIds.append(surfaces[-1].Id)
# Generate surface loop and volume
if genSurfaceLoop:
surfaceLoop=self.addSurfaceLoop(surfaceIDs=surfaceIds)
else:
surfaceLoop=None
if genVol:
vol=self.addVolume(surfaceLoopID=surfaceLoop.Id)
else:
vol=None
return [vertices1,vertices2],[arcs1,arcs2],lines,loops,surfaces,surfaceLoop,vol
def insertVertex(self,obj,copy=False,strict=True,debug=False):
"""Inserts vertex into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.vertex): A vertex.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated edges list.
"""
if self.getVertexByX(obj.x)[0]!=False:
if debug:
printWarning("Vertex with x=" +str(obj.x) + " already exists.")
return self.insertElement("vertices",obj,copy=copy,strict=strict,debug=debug)
def insertEdge(self,obj,copy=False,strict=True,debug=False):
"""Inserts edge into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.edge): A edge.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated edges list.
"""
LOld=len(self.edges)
l=self.insertElement("edges",obj,copy=copy,strict=strict,debug=debug)
b=(LOld<len(self.edges))
if b:
if obj.typ==0:
self.lines.append(obj)
if obj.typ==1:
self.arcs.append(obj)
if obj.typ==2:
self.bSpline.append(obj)
return l
def insertLineLoop(self,obj,copy=False,strict=True,debug=False):
"""Inserts line loop into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): A line loop.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated lineLoops list.
"""
return self.insertElement("lineLoops",obj,copy=copy,strict=strict,debug=debug)
def insertRuledSurface(self,obj,copy=False,strict=True,debug=False):
"""Inserts ruled surface into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): A ruled surface.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated ruledSurfaces list.
"""
return self.insertElement("ruledSurfaces",obj,copy=copy,strict=strict,debug=debug)
def insertSurfaceLoop(self,obj,copy=False,strict=True,debug=False):
"""Inserts surface loop into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): A surface loop.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated surfaceLoops list.
"""
return self.insertElement("ruledSurfaces",obj,copy=copy,strict=strict,debug=debug)
def insertVolume(self,obj,copy=False,strict=True,debug=False):
"""Inserts volume into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.volume): A volume.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updates volumes list.
"""
return self.insertElement("volumes",obj,copy=copy,strict=strict,debug=debug)
def insertField(self,obj,copy=False,strict=True,debug=False):
"""Inserts field into domain.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.insertElement`.
Args:
obj (pyfrp.modules.pyfrp_gmsh_geometry.field): A field.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updates fields list.
"""
return self.insertElement("fields",obj,copy=copy,strict=strict,debug=debug)
def insertElement(self,element,obj,copy=False,strict=True,debug=False):
"""Inserts gmshElement into domain.
Checks if there is already a element with ID.
.. note:: If ``copy=True``, will generate copy of element. This might mess
with some connection between elements. Thus ``copy=False`` as default.
Possible values for ``element`` are:
* vertices
* lines
* arcs
* lineLoops
* bSplines
* ruledSurfaces
* surfaceLoops
* volumes
* fields
* auto
.. note:: ``element='auto'`` will automatically detect the type of element and insert it at the right
point.
Will automatically set ``self`` as element's domain.
.. note:: If ``strict=True``, will not allow double IDs.
Args:
element (str): Name of element list where object belongs.
obj (pyfrp.modules.pyfrp_gmsh_geometry.gmshElement): Element to insert.
Keyword Args:
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: Updated respective element list.
"""
if element=='auto':
element = obj.getTypeListName()
if element in ['edges','bSplines','arcs','lines']:
return self.insertEdge(obj,strict=strict,copy=copy,debug=debug)
if element=='vertices':
return self.insertVertex(obj,strict=strict,copy=copy,debug=debug)
if self.checkIdExists(obj.Id,getattr(self,element),debug=debug):
if debug:
printWarning(obj.getType() + " with Id=" +str(obj.getID()) + " already exits.")
if strict:
return getattr(self,element)
if copy:
e=obj.getCopy()
else:
e=obj
e.domain=self
getattr(self,element).append(e)
return getattr(self,element)
def getRuledSurfacesByNormal(self,direction,onlyAbs=True):
"""Returns all surfaces in domain that have given normal vector.
The direction can be given in multiple ways:
* A ``numpy.ndarray``: The method will look for all surfaces with same normal vector than array.
* A ``str``: The method will first check if ``direction='all'`` is given. If so, return all surfaces.
Otherwise the method will decode the string ("x"/"y","z") using
:py:func:`pyfrp.modules.pyfrp_geometry_module.decodeEuclideanBase`, then proceed the same way as
with the ``numpy.ndarray``.
* A ``list`` of the previous options: Will find all surface matching each of them.
.. note:: If ``onlyAbs=True``, will only look for matches in terms of absolute value. If a list of directions is
given, then one can also specifiy a list of ``onlyAbs`` values.
Args:
direction (numpy.ndarray): Direction to be matched.
Keyword Args:
onlyAbs (bool): Only try to match in terms of absolute value.
Returns:
list: List of matching surfaces.
"""
# Check if we need to return all of them
if direction=='all':
return list(self.ruledSurfaces)
# Result list
sfs=[]
# If list, call recursively
if isinstance(direction,list):
if not isinstance(onlyAbs,list):
onlyAbs=len(direction)*[onlyAbs]
for i,d in enumerate(direction):
sfs=sfs+self.getRuledSurfacesByNormal(d,onlyAbs=onlyAbs[i])
return sfs
# If given as string, decode
if isinstance(direction,str):
d=pyfrp_geometry_module.decodeEuclideanBase(direction)
else:
d=direction
# Look for matching surfaces
for sf in self.ruledSurfaces:
if onlyAbs:
if pyfrp_misc_module.compareVectors(abs(sf.getNormal().astype('float')),d.astype('float')):
sfs.append(sf)
else:
if pyfrp_misc_module.compareVectors(sf.getNormal().astype('float'),d.astype('float')):
sfs.append(sf)
return sfs
def checkIdExists(self,Id,objList,debug=False):
"""Checks if any object in ``objList`` already has ID ``Id``.
Args:
Id (int): ID to be checked.
objList (list): List of objects, for example ``edges``.
Keyword Args:
debug (bool): Print debugging output.
Returns:
bool: True if any object has ID ``Id``.
"""
IdList=pyfrp_misc_module.objAttrToList(objList,'Id')
if Id in IdList:
if debug:
printWarning("Object with Id " + str(Id) + " already exists.")
return True
return False
def getNewId(self,objList,Id=None):
"""Returns free ID for object type.
Args:
objList (list): List of objects, for example ``edges``.
Keyword Args:
Id (int): ID to be checked.
Returns:
int: New free ID.
"""
if Id==None:
newId=self.incrementID(objList)
else:
if self.checkIdExists(Id,objList):
newId=self.incrementID(objList)
else:
newId=Id
return newId
def incrementID(self,objList):
"""Returns ID that is by one larger for a specific
object type.
Args:
objList (list): List of objects, for example ``edges``.
Returns:
int: Incremented ID.
"""
if len(objList)==0:
newId=1
else:
IdList=pyfrp_misc_module.objAttrToList(objList,'Id')
newId=max(IdList)+1
return newId
def getEdgeById(self,ID):
"""Returns edge with ID ``ID``.
Returns ``(False,False)`` if edge cannot be found.
Args:
ID (int): ID of edge.
Returns:
tuple: Tuple containing:
* e (pyfrp.modules.pyfrp_gmsh_geometry.edge): Edge.
* i (int): Position in ``edges`` list.
"""
for i,e in enumerate(self.edges):
if e.Id==ID:
return e,i
return False,False
def getEdgeByVertices(self,v1,v2):
"""Returns edge between vertex ``v1`` and ``v2``.
Returns ``(False,False)`` if edge cannot be found.
Args:
v1 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Vertex 1.
v2 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Vertex 2.
Returns:
tuple: Tuple containing:
* e (pyfrp.modules.pyfrp_gmsh_geometry.edge): Edge.
* i (int): Position in ``edges`` list.
"""
for i,e in enumerate(self.edges):
vertices=[e.getFirstVertex(1),e.getLastVertex(1)]
if v1 in vertices and v2 in vertices:
return e,i
return False,False
def getLineLoopById(self,ID):
"""Returns lineLoop with ID ``ID``.
Returns ``(False,False)`` if lineLoop cannot be found.
Args:
ID (int): ID of lineLoop.
Returns:
tuple: Tuple containing:
* l (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): lineLoop.
* i (int): Position in ``lineLoops`` list.
"""
for i,l in enumerate(self.lineLoops):
if l.Id==ID:
return l,i
return False,False
def getRuledSurfaceById(self,ID):
"""Returns ruledSurface with ID ``ID``.
Returns ``(False,False)`` if ruledSurface cannot be found.
Args:
ID (int): ID of ruledSurface.
Returns:
tuple: Tuple containing:
* l (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): ruledSurface.
* i (int): Position in ``ruledSurfaces`` list.
"""
for i,l in enumerate(self.ruledSurfaces):
if l.Id==ID:
return l,i
return False,False
def getSurfaceLoopById(self,ID):
"""Returns surfaceLoop with ID ``ID``.
Returns ``(False,False)`` if surfaceLoop cannot be found.
Args:
ID (int): ID of surfaceLoop.
Returns:
tuple: Tuple containing:
* l (pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop): surfaceLoop.
* i (int): Position in ``surfaceLoops`` list.
"""
for i,l in enumerate(self.surfaceLoops):
if l.Id==ID:
return l,i
return False,False
def getVolumeById(self,ID):
"""Returns volume with ID ``ID``.
Returns ``(False,False)`` if volume cannot be found.
Args:
ID (int): ID of volume.
Returns:
tuple: Tuple containing:
* l (pyfrp.modules.pyfrp_gmsh_geometry.volume): volume.
* i (int): Position in ``volumes`` list.
"""
for i,l in enumerate(self.volumes):
if l.Id==ID:
return l,i
return False,False
def getFieldById(self,ID):
"""Returns field with ID ``ID``.
Returns ``(False,False)`` if field cannot be found.
Args:
ID (int): ID of field.
Returns:
tuple: Tuple containing:
* f (pyfrp.modules.pyfrp_gmsh_geometry.field): Field.
* i (int): Position in ``fields`` list.
"""
for i,f in enumerate(self.fields):
if f.Id==ID:
return f,i
return False,False
def getVertexById(self,ID):
"""Returns vertex with ID ``ID``.
Returns ``(False,False)`` if vertex cannot be found.
Args:
ID (int): ID of vertex.
Returns:
tuple: Tuple containing:
* v (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Vertex.
* i (int): Position in ``vertices`` list.
"""
for i,v in enumerate(self.vertices):
if v.Id==ID:
return v,i
return False,False
def getVertexByX(self,x):
"""Returns vertex at coordinate ``x``.
Returns ``(False,False)`` if vertex cannot be found.
Args:
x (numpy.ndarry): Coordinate of vertex.
Returns:
tuple: Tuple containing:
* v (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Vertex.
* i (int): Position in ``vertices`` list.
"""
for i,v in enumerate(self.vertices):
if (np.array(x)==v.x).sum()==len(v.x):
return v,i
return False,False
def draw(self,ax=None,color='k',ann=None,drawSurfaces=False,surfaceColor='b',alpha=0.2,backend='mpl',asSphere=True,size=5,annElements=[True,True,True],linewidth=1):
"""Draws complete domain.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
With ``annElements`` the user has the possibility to only annotate given elements. For example
``annElements=[False,True,False]`` only annotates edges.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of domain.
ann (bool): Show annotations.
asSphere (bool): Draws vertex as sphere (only in vtk mode).
size (float): Size of vertex (only in vtk mode).
annElements (list): Only annotate some element types.
linewidth (float): Line width.
Returns:
matplotlib.axes: Updated axes.
"""
if ann==None:
ann=False
for v in self.vertices:
ax=v.draw(ax=ax,color=color,ann=ann*annElements[0],backend=backend,size=size,asSphere=asSphere,render=False)
for e in self.edges:
ax=e.draw(ax=ax,color=color,ann=ann*annElements[1],backend=backend,render=False,linewidth=linewidth)
if drawSurfaces:
for s in self.ruledSurfaces:
ax=s.draw(ax=ax,color=surfaceColor,alpha=alpha,backend=backend,ann=ann*annElements[2])
if backend=="vtk":
ax=pyfrp_vtk_module.renderVTK(ax,start=False)
return ax
def getExtend(self):
"""Returns extend of domain in all 3 dimensions.
Returns:
tuple: Tuple containing:
* minx (float): Minimal x-coordinate.
* maxx (float): Maximal x-coordinate.
* miny (float): Minimal y-coordinate.
* maxy (float): Maximal y-coordinate.
* minz (float): Minimal z-coordinate.
* maxz (float): Maximal z-coordinate.
"""
x=[]
y=[]
z=[]
for v in self.vertices:
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
return min(x), max(x), min(y),max(y), min(z),max(z)
def verticesCoordsToList(self):
"""Returns list of coordinates from all vertrices.
Returns:
list: List of (x,y,z) coordinates.
"""
l=[]
for v in self.vertices:
l.append(v.x)
return l
def setGlobalVolSize(self,volSize):
"""Sets volSize for all nodes in geometry.
"""
for v in self.vertices:
v.volSize=volSize
def addLineLoop(self,Id=None,edgeIDs=[]):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.lineLoop` instance
with given edgeIDs.
Keyword Args:
edgeIDs (list): List of edge IDs included in line loop.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.lineLoop: New lineLoop instance.
"""
newId=self.getNewId(self.lineLoops,Id)
l=lineLoop(self,edgeIDs,newId)
self.lineLoops.append(l)
return l
def addAllSurfacesToLoop(self):
"""Adds all surfaces in domain to a single surfaceLoop.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop: New surfaceLoop instance.
"""
surfaceIDs=pyfrp_misc_module.objAttrToList(self.ruledSurfaces,'Id')
return self.addSurfaceLoop(surfaceIDs=surfaceIDs)
def addEnclosingVolume(self):
"""Adds volume enclosing all surfaces.
See also :py:func:`addAllSurfacesToLoop`.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.volume: New volume instance.
"""
s=self.addAllSurfacesToLoop()
return self.addVolume(surfaceLoopID=s.Id)
def addSurfaceLoop(self,Id=None,surfaceIDs=[]):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop` instance
with given surfaceIDs.
Keyword Args:
surfaceIDs (list): List of surface IDs included in surface loop.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoop: New surfaceLoop instance.
"""
newId=self.getNewId(self.surfaceLoops,Id)
l=surfaceLoop(self,surfaceIDs,newId)
self.surfaceLoops.append(l)
return l
def addRuledSurface(self,Id=None,lineLoopID=None):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface` instance
with given lineLoop.
Keyword Args:
lineLoopID (ID): ID of line loop.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface: New ruledSurface instance.
"""
newId=self.getNewId(self.ruledSurfaces,Id)
l=ruledSurface(self,lineLoopID,newId)
self.ruledSurfaces.append(l)
return l
def addVolume(self,Id=None,surfaceLoopID=None):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.volume` instance
with given surfaceLoop.
Keyword Args:
surfaceLoopID (ID): ID of surface loop.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.volume: New volume instance.
"""
newId=self.getNewId(self.volumes,Id)
l=volume(self,surfaceLoopID,newId)
self.volumes.append(l)
return l
def addBoxField(self,Id=None,volSizeIn=10.,volSizeOut=20.,xRange=[],yRange=[],zRange=[]):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.boxField` instance.
Keyword Args:
Id (int): ID of field.
volSizeIn (float): Mesh element volume inside box.
volSizeOut (float): Mesh element volume outside box.
xRange (list): Range of box field in x-direction given as ``[minVal,maxVal]``.
yRange (list): Range of box field in y-direction given as ``[minVal,maxVal]``.
zRange (list): Range of box field in z-direction given as ``[minVal,maxVal]``.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.boxField: New boxField instance.
"""
newId=self.getNewId(self.fields,Id)
l=boxField(self,newId,volSizeIn=volSizeIn,volSizeOut=volSizeOut,xRange=xRange,yRange=yRange,zRange=zRange)
self.fields.append(l)
return l
def addThresholdField(self,Id=None,IField=None,LcMin=5.,LcMax=20.,DistMin=30.,DistMax=60.):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.thresholdField` instance.
.. image:: ../imgs/pyfrp_gmsh_geometry/thresholdField.png
Keyword Args:
Id (int): ID of field.
IField (int): ID of vertex that is center to threshold field.
LcMin (float): Minimum volSize of threshold field.
LcMax (float): Maximum volSize of threshold field.
DistMin (float): Minimun density of field.
DistMax (float): Maximum density of field.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.thresholdField: New thresholdField instance.
"""
newId=self.getNewId(self.fields,Id)
l=thresholdField(self,newId,IField=IField,LcMin=LcMin,LcMax=LcMax,DistMin=DistMin,DistMax=DistMax)
self.fields.append(l)
return l
def addAttractorField(self,Id=None,NodesList=[]):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.attractorField` instance.
Keyword Args:
Id (int): ID of field.
NodesList (list): List of IDs of the Nodes that attractor field centers around.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.attractorField: New attractorField instance.
"""
newId=self.getNewId(self.fields,Id)
l=attractorField(self,newId,NodesList=NodesList)
self.fields.append(l)
return l
def addMinField(self,Id=None,FieldsList=[]):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.minField` instance.
Keyword Args:
Id (int): ID of field.
NodesList (list): List of IDs of the Nodes that attractor field centers around.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.minField: New attractorField instance.
"""
newId=self.getNewId(self.fields,Id)
l=minField(self,newId,FieldsList=FieldsList)
self.fields.append(l)
return l
def addBoundaryLayerField(self,Id=None,AnisoMax=10000000000,hwall_n=1.,hwall_t=1,ratio=1.1,thickness=10.,hfar=1.,IntersectMetrics=1,Quads=0.):
"""Adds new :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField` instance.
Keyword Args:
Id (int): ID of field.
AnisoMax (float): Threshold angle for creating a mesh fan in the boundary layer.
IntersectMetrics (int): Intersect metrics of all faces.
Quad (int): Generate recombined elements in the boundary layer.
har (float): Element size far from the wall.
hwall_n (float): Mesh Size Normal to the The Wall.
hwall_t (float): Mesh Size Tangent to the Wall.
ratio (float): Size Ratio Between Two Successive Layers.
thickness (float): Maximal thickness of the boundary layer.
List (list): List of field IDs.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField: New boundaryLayerField instance.
"""
newId=self.getNewId(self.fields,Id)
l=boundaryLayerField(self,newId,AnisoMax=AnisoMax,hwall_n=hwall_n,hwall_t=hwall_t,ratio=ratio,thickness=thickness,hfar=hfar,IntersectMetrics=IntersectMetrics,Quads=Quads)
self.fields.append(l)
return l
def setAnnOffset(self,offset):
"""Sets annotation offset for plotting.
Args:
offset (numpy.ndarray): New offset.
"""
self.annXOffset=offset[0]
self.annYOffset=offset[1]
self.annZOffset=offset[2]
def writeToFile(self,fn):
"""Writes domain to file.
Args:
fn (str): File path to write to.
"""
with open(fn,'wb') as f:
self.writeElements("vertices",f)
self.writeElements("lines",f)
self.writeElements("arcs",f)
self.writeElements("bSplines",f)
self.writeElements("lineLoops",f)
self.writeElements("ruledSurfaces",f)
self.writeElements("surfaceLoops",f)
self.writeElements("volumes",f)
self.writeElements("fields",f)
def writeElements(self,element,f):
"""Writes all entities of a specific element type to file.
Possible elements are:
* vertices
* lines
* arcs
* lineLoops
* bSplines
* ruledSurfaces
* surfaceLoops
* volumes
* fields
Args:
element (str): Element type to write.
f (file): File to write to.
"""
pyfrp_gmsh_IO_module.writeComment(f,element)
for v in getattr(self,element):
f=v.writeToFile(f)
f.write("\n")
def incrementAllIDs(self,offset):
"""Adds offset to all entity IDs.
Args:
offset (int): Offset to be added.
"""
self.incrementIDs(offset,"vertices")
self.incrementIDs(offset,"lines")
self.incrementIDs(offset,"arcs")
self.incrementIDs(offset,"bSplines")
self.incrementIDs(offset,"lineLoops")
self.incrementIDs(offset,"ruledSurfaces")
self.incrementIDs(offset,"surfaceLoops")
self.incrementIDs(offset,"volumes")
def incrementIDs(self,offset,element):
"""Adds offset to all entity IDs.
Possible elements are:
* vertices
* lines
* arcs
* lineLoops
* ruledSurfaces
* surfaceLoops
* volumes
* fields
Args:
offset (int): Offset to be added.
element (str): Element type to increment.
"""
for e in getattr(self,element):
e.Id=e.Id+offset
def setDomainGlobally(self):
"""Makes sure that ``self`` is domain for all
elements.
"""
self.setDomainForElementType("vertices")
self.setDomainForElementType("lines")
self.setDomainForElementType("arcs")
self.setDomainForElementType("bSplines")
self.setDomainForElementType("lineLoops")
self.setDomainForElementType("ruledSurfaces")
self.setDomainForElementType("surfaceLoops")
self.setDomainForElementType("volumes")
self.setDomainForElementType("fields")
def setDomainForElementType(self,element):
"""Makes sure that ``self`` is domain for all
elements of given type.
Possible elements are:
* vertices
* lines
* arcs
* lineLoops
* ruledSurfaces
* surfaceLoops
* volumes
* fields
Args:
offset (int): Offset to be added.
element (str): Element type to increment.
"""
for e in getattr(self,element):
e.domain=self
def getMaxID(self,element):
"""Returns maximum ID for a specific element.
Possible elements are:
* vertices
* lines
* arcs
* lineLoops
* ruledSurfaces
* surfaceLoops
* volumes
Args:
element (str): Element type.
Returns:
int: Maximum ID.
"""
IDs=[]
for e in getattr(self,element):
IDs.append(e.Id)
try:
return max(IDs)
except ValueError:
0.
def getAllMaxID(self):
"""Returns maximum ID over all elements.
Returns:
int: Maximum ID.
"""
IDs=[]
IDs.append(self.getMaxID("vertices"))
IDs.append(self.getMaxID("lines"))
IDs.append(self.getMaxID("arcs"))
IDs.append(self.getMaxID("lineLoops"))
IDs.append(self.getMaxID("ruledSurfaces"))
IDs.append(self.getMaxID("surfaceLoops"))
IDs.append(self.getMaxID("volumes"))
return max(IDs)
def getAllFieldsOfType(self,typ):
"""Returns all fields of domain with specific typ.
Returns:
list: List of :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.field` objects.
"""
fs=[]
for f in self.fields:
if f.typ==typ:
fs.append(f)
return fs
def getBkgdField(self):
"""Returns background field of domain.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.field: Background field.
"""
return self.bkgdField
def hasBkgdField(self):
"""Checks if domain already has a background field.
Returns:
bool: True if background field already exists.
"""
return self.bkgdField!=None
def genMinBkgd(self,FieldsList=[]):
"""Generates minimum field as background field.
If domain already has minimum field, will take it and set it
as background field. If domain has multiple minimum fields, will take
the first one that appears in ``fields`` list.
Keyword Args:
FieldsList (list): List of field IDs included in minField.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.minField: Minimum field.
"""
#Generate minField if not existent
minFields=self.getAllFieldsOfType("min")
if len(minFields)==0:
if len(FieldsList)==0:
FieldsList=pyfrp_misc_module.objAttrToList(self.fields,'Id')
minField=self.addMinField(FieldsList=FieldsList)
else:
if self.hasBkgdField():
if self.getBkgdField() in minFields:
minField=self.getBkgdField()
else:
minField=minFields[0]
minField.setAsBkgdField()
if not self.hasBkgdField():
minField.setAsBkgdField()
return self.getBkgdField()
def getAllObjectsWithProp(self,objName,attr,val):
"""Filters all objects of type objName given attribute value.
Possible objects names are:
* vertices
* lines
* arcs
* lineLoops
* ruledSurfaces
* surfaceLoops
* volumes
* fields
.. note:: ``val`` can have any datatype.
Args:
objName (str): Name of object list.
attr (str): Name of attribute.
val (str): Value of attribute.
Returns:
list: List of objects that fulfill requirement.
"""
objects=getattr(self,objName)
filteredObjects=pyfrp_misc_module.getAllObjWithAttrVal(objects,attr,val)
return filteredObjects
def simplifySurfaces(self,iterations=3,triangIterations=2,addPoints=False,fixSurfaces=True,debug=False):
"""Tries to simplify surfaces inside the domain.
Does this by:
* For ``iterations`` iterations, do:
* Find all surfaces with the same normal vector.
* Try to fuse this surfaces, see also :py:func:`pyfrp.modules.pyfrp_geometry_module.ruledSurface.fuse`.
* Clean up edges via :py:func:`pyfrp.modules.pyfrp_geometry_module.domain.cleanUpUnusedEdges`.
* Fixing loops via :py:func:`pyfrp.modules.pyfrp_geometry_module.domain.fixAllLoops`.
* Fixing surfaces via :py:func:`pyfrp.modules.pyfrp_geometry_module.domain.fixAllSurfaces`.
Keyword Args:
iterations (int): Number of iterations used for simplification.
triangIterations (int): Number of iterations used for subdivision of surfaces.
addPoints (bool): Allow adding points inside surface triangles.
fixSurfaces (bool): Allow fixing of surfaces, making sure they are coherent with Gmsh requirements.
debug (bool): Print debugging messages.
"""
#Remember the stats from the start
x=len(self.ruledSurfaces)
y=len(self.lineLoops)
z=len(self.edges)
#Compute the normal of all surfaces
for surface in self.ruledSurfaces:
surface.getNormal()
#Loop through iterations
for k in range(iterations):
#Loop through surfaces
for i,surface in enumerate(self.ruledSurfaces):
#Get all surfaces with same normal vector
sameNormal=self.getAllObjectsWithProp("ruledSurfaces","normal",surface.normal)
sameNormal=sameNormal+self.getAllObjectsWithProp("ruledSurfaces","normal",-surface.normal)
#Loop through all with same normal
for j,sN in enumerate(sameNormal):
if sN==surface:
continue
#Fuse
if surface.fuse(sN,debug=debug):
if debug:
print "Successfully fused ", surface.Id, sN.Id
#Clean up edges
self.cleanUpUnusedEdges(debug=debug)
#Print some final statistics
if debug:
print "Surfaces: Before =" , x , " After:" , len(self.ruledSurfaces)
print "lineLoops: Before =" , y , " After:" , len(self.lineLoops)
print "Edges: Before =" , z , " After:" , len(self.edges)
#raw_input()
#Fix loops and surfaces
self.fixAllLoops(debug=debug)
if fixSurfaces:
self.fixAllSurfaces(iterations=triangIterations,addPoints=addPoints)
def cleanUpUnusedEdges(self,debug=False):
"""Cleans up all unused edges in domain.
See also: :py:func:`pyfrp.pyfrp_modules.pyfrp_gmsh_geometry.edge.delete`.
Keyword Args:
debug (bool): Print debugging output.
"""
for edge in self.edges:
edge.delete(debug=debug)
def fixAllLoops(self,debug=False):
"""Tries to fix all loops in domain.
See also: :py:func:`pyfrp.pyfrp_modules.pyfrp_gmsh_geometry.lineLoop.fix`.
Keyword Args:
debug (bool): Print debugging output.
"""
for loop in self.lineLoops:
loop.fix()
def fixAllSurfaces(self,debug=False,iterations=2,addPoints=False):
"""Tries to fix all surfaces in domain.
Does this by reiniating all ``lineLoop``.
See also: :py:func:`pyfrp.pyfrp_modules.pyfrp_gmsh_geometry.ruledSurface.initLineLoop`.
Keyword Args:
iterations (int): Number of iterations used for subdivision of surfaces.
addPoints (bool): Allow adding points inside surface triangles.
debug (bool): Print debugging messages.
"""
for surface in self.ruledSurfaces:
surface.initLineLoop(surface.lineLoop.Id,debug=debug,iterations=iterations,addPoints=addPoints)
def save(self,fn):
"""Saves domain to pickle file.
Args:
fn (str): Output filename.
"""
pyfrp_IO_module.saveToPickle(self,fn=fn)
def merge(self,d):
"""Merges domain d into this domain.
Does this by:
* Incrementing all IDs in ``d`` such that there is no overlap with ``self``.
* Merging all element lists.
* Making sure that all elements refer to ``self`` as domain.
See also :py:func:`incrementAllIDs` and :py:func:`setDomainGlobally`.
Args:
d (pyfrp.modules.pyfrp_geometry_module.domain): Domain to merge.
"""
d.incrementAllIDs(self.getAllMaxID()+1)
self.edges=self.edges+d.edges
self.vertices=self.vertices+d.vertices
self.arcs=self.arcs+d.arcs
self.bSplines=self.bSplines+d.bSplines
self.lines=self.lines+d.lines
self.lineLoops=self.lineLoops+d.lineLoops
self.ruledSurfaces=self.ruledSurfaces+d.ruledSurfaces
self.surfaceLoops=self.surfaceLoops+d.surfaceLoops
self.volumes=self.volumes+d.volumes
self.fields=self.fields+d.fields
self.setDomainGlobally()
def removeDuplicates(self,debug=False):
self.removeDuplicateEdgeIDs(debug=debug)
self.removeDuplicateVerticesIDs()
def removeDuplicateVerticesIDs(self):
"""Checks if multiple vertices have the same ID and tries to remove one of them.
Checks if vertices with same ID have the same coordinate. If so, remove all but one. Otherwise fixes
index.
Returns:
list: Updated vertices list.
"""
# Loop through edges
for i,v in enumerate(self.vertices):
for j in range(i+1,len(self.vertices)):
# Check if same ID
if self.vertices[j].Id==self.vertices[i].Id:
# Check if same start/end vertex
if pyfrp_misc_module.compareVectors(self.vertices[j].x,self.vertices[i].x):
self.vertices.remove(self.vertices[j])
else:
newId=self.getNewId(self.vertices,None)
self.vertices[j].setID(newId)
return self.vertices
def removeDuplicateEdgeIDs(self,debug=False):
"""Checks if multiple edges have the same ID and tries to remove one of them.
Checkss if edges with same ID have the same start and end vertex. If so, removes it all but one. Otherwise fixes
index.
Returns:
list: Updated edges list.
"""
edgeIDs=pyfrp_misc_module.objAttrToList(self.edges,'Id')
print edgeIDs
# Loop through edges
for i,e in enumerate(self.edges):
for j in range(i+1,len(self.edges)):
print i,j,self.edges[i].Id,self.edges[j].Id
# Check if same ID
if self.edges[j].Id==self.edges[i].Id:
print "same id",self.edges[j].Id
# Check if same start/end vertex
if (self.edges[j].getFirstVertex()==self.edges[i].getFirstVertex()) and (self.edges[j].getLastVertex()==self.edges[i].getLastVertex()):
self.edges[j].delete(debug=debug)
else:
newId=self.getNewId(self.edges,None)
self.edges[j].setID(newId)
return self.edges
class gmshElement(object):
def __init__(self,domain,Id):
self.domain=domain
self.Id=Id
def getID(self):
"""Returns ID of element.
Returns:
int: ID of element.
"""
return self.Id
def setID(self,Id):
"""Sets ID of element.
Args:
Id (int): New ID of element.
Returns:
int: New ID of element.
"""
self.Id=Id
return self.Id
def getCopy(self):
"""Returns copy of element.
Uses `copy.copy` to generate copy.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.gmshElement: Copy of element.
"""
return cpy.copy(self)
def getType(self):
"""Returns type of element.
Returns:
str: Type of element.
"""
t=str(type(self))
t=t.split("'")[1]
t=t.replace("pyfrp.modules.pyfrp_gmsh_geometry.","")
return t
def getTypeListName(self):
"""Returns the element lists name.
Returns:
str: Name of element list.
"""
if self.getType()=="vertex":
return "vertices"
if self.getType()=="line":
return "lines"
if self.getType()=="arc":
return "arcs"
if self.getType()=="edge":
return "edges"
if self.getType()=="lineLoop":
return "lineLoops"
if self.getType()=="ruledSurface":
return "ruledSurfaces"
if self.getType()=="surfaceLoop":
return "surfaceLoops"
if self.getType()=="volume":
return "volumes"
if self.getType()=="field":
return "fields"
def getTypeList(self):
"""Returns the element list of domain for this element.
Returns:
list: Element list.
"""
return self.getDomain().getattr(self.getTypeListName)
def getDomain(self):
"""Returns element's domain.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: Element's domain.
"""
return self.domain
def setDomain(self,d):
"""Sets element's domain.
Args:
d (pyfrp.modules.pyfrp_gmsh_geometry.domain): New domain
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.domain: New domain.
"""
self.domain=d
return self.domain
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return []
def getAllSubElements(self,elements=[]):
"""Finds all elements that are necessary to define this element recursively.
Returns:
list: List of elements.
"""
elements=list(elements)
if len(self.getSubElements())==0:
return elements
else:
for el in self.getSubElements():
elements.append(el)
elements=el.getAllSubElements(elements=elements)
return elements
def extract(self,d=None,strict=True,copy=False,debug=False):
"""Extracts element and all elements necessary to define it.
.. note:: If ``d`` is specified, then all extracted elements are inserted into ``d`` using
:py:func:`insertElement`.
Keyword Args:
d (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain to insert element
copy (bool): Inserts copy of object.
strict (bool): Don't allow IDs to be assigned to multiple elements.
debug (bool): Print debugging output.
Returns:
list: List of elements.
"""
elmts=[self]+self.getAllSubElements()
if d!=None:
for el in elmts:
d.insertElement('auto',el,strict=strict,copy=copy,debug=debug)
return elmts
class vertex(gmshElement):
"""Vertex class storing information from gmsh .geo Points.
.. note:: ``volSize`` does not have any effect on the geometry itself but is simply
stored in the vertex object for further usage.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain vertex belongs to.
x (numpy.ndarray): Coordinate of vertex.
Id (int): ID of vertex.
Keyword Args:
volSize (float): Element size at vertex.
"""
def __init__(self,domain,x,Id,volSize=None):
gmshElement.__init__(self,domain,Id)
self.x=np.array(x)
self.volSize=volSize
def draw(self,ax=None,color=None,ann=None,backend="mpl",asSphere=True,size=10,render=False):
"""Draws vertex.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of vertex.
ann (bool): Show annotations.
asSphere (bool): Draws vertex as sphere (only in vtk mode).
size (float): Size of vertex (only in vtk mode).
render (bool): Render in the end (only in vtk mode).
Returns:
matplotlib.axes: Updated axes.
"""
if backend=="mpl":
ax=self.drawMPL(ax=ax,color=color,ann=ann)
if backend=="vtk":
ax=self.drawVTK(color=color,ann=ann,ax=ax,asSphere=asSphere,size=size,render=render)
return ax
def drawMPL(self,ax=None,color=None,ann=None):
"""Draws vertrex into matplotlib axes.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of domain.
ann (bool): Show annotations.
Returns:
matplotlib.axes: Axes.
"""
if ann==None:
ann=False
if ax==None:
fig,axes = pyfrp_plot_module.makeGeometryPlot()
ax=axes[0]
ax.scatter(self.x[0],self.x[1],self.x[2],c=color)
if ann:
ax.text(self.x[0]+self.domain.annXOffset, self.x[1]+self.domain.annYOffset, self.x[2]+self.domain.annZOffset, "p"+str(self.Id), None)
pyfrp_plot_module.redraw(ax)
return ax
def drawVTK(self,size=10,asSphere=True,ax=None,ann=None,color=[0,0,0],render=False):
"""Draws vertrex into VTK renderer.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new ``vtkRenderer``,
see also :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
Keyword Args:
ax (vtk.vtkRenderer): Renderer to draw in.
color (str): Color of vertex.
ann (bool): Show annotations.
asSphere (bool): Draws vertex as sphere.
size (float): Size of vertex.
render (bool): Render in the end.
Returns:
vtk.vtkRenderer: Updated renderer.
"""
if ann==None:
ann=False
if ax==None:
ax,renderWindow,renderWindowInteractor=pyfrp_vtk_module.makeVTKCanvas()
pyfrp_vtk_module.drawVTKPoint(self.x,asSphere=asSphere,color=color,size=size,renderer=ax)
if ann:
printWarning("Annotations don't properly work with backend=vtk .")
pyfrp_vtk_module.drawVTKText("p"+str(self.Id),[self.x[0]+self.domain.annXOffset, self.x[1]+self.domain.annYOffset, self.x[2]+self.domain.annZOffset],renderer=ax)
if render:
ax=pyfrp_vtk_module.renderVTK(ax,start=False)
return ax
def setX(self,x):
"""Sets coordinate if vertex to ``x``.
Returns:
numpy.ndarray: New vertex coordinate.
"""
self.x=x
return self.x
def writeToFile(self,f):
"""Writes vertex to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Point("+str(self.Id)+")= {" + str(self.x[0]) + ","+ str(self.x[1])+ "," + str(self.x[2]) + ',' + str(self.volSize) + "};\n" )
return f
def addToAttractor(self,attrField=None,LcMin=5.,LcMax=20.,DistMin=30.,DistMax=60.):
"""Adds vertex to a attractor field.
If no field is given, will create new one with given parameters. Will also create
a new threshhold field around attractor and add fields to minField. If no minField exists,
will create a new one too and set it as background field.
See also :py:func:`addAttractorField`, :py:func:`addThresholdField`, :py:func:`addMinField` and :py:func:`genMinBkgd`.
Keyword Args:
attrField (pyfrp.modules.pyfrp_gmsh_geometry.attractorField): Attractor field object.
LcMin (float): Minimum volSize of threshold field.
LcMax (float): Maximum volSize of threshold field.
DistMin (float): Minimun density of field.
DistMax (float): Maximum density of field.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.attractorField: Attractor field around vertex.
"""
#Generate attractor field if not given
if attrField==None:
attrField=self.domain.addAttractorField(NodesList=[self.Id])
else:
attrField.addNodeByID(self.Id)
#Generate threshhold field if not already existent
threshFields=attrField.includedInThresholdField()
if len(threshFields)==0:
threshField=self.domain.addThresholdField(IField=attrField.Id,LcMin=LcMin,LcMax=LcMax,DistMin=DistMin,DistMax=DistMax)
else:
threshField=threshFields[0]
self.domain.genMinBkgd(FieldsList=[threshField.Id])
return attrField
def addToBoundaryLayer(self,boundField=None,**fieldOpts):
"""Adds vertex to a boundary layer field.
If no field is given, will create new one with given parameters and add it to a minField. If no minField exists,
will create a new one too and set it as background field.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addBoundaryLayerField`
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addMinField` and
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.genMinBkgd`.
Keyword Args:
boundField (pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField): Boundary layer field object.
fieldOpts (dict): See documentation of boundary layer field of all available options.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField: Boundary layer field around vertex.
"""
#Generate attractor field if not given
if boundField==None:
boundField=self.domain.addBoundaryLayerField()
#Add Vertex
boundField.addNodeByID(self.Id)
#Set options
boundField.setFieldAttributes(**fieldOpts)
#Generate background field
self.domain.genMinBkgd(FieldsList=[boundField.Id])
return boundField
class edge(gmshElement):
"""Edge class storing information from gmsh .geo circles and lines.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain vertex belongs to.
Id (int): ID of edge.
typ (int): Type of edge (1=arc/0=line).
"""
def __init__(self,domain,Id,typ):
gmshElement.__init__(self,domain,Id)
self.typ=typ
def getDomain(self):
"""Returns domain edge belongs to."""
return self.domain
def getTyp(self):
"""Returns Type of edge."""
return self.typ
def decodeTyp(self):
"""Decodes type of edge into string."""
if typ==1:
return "arc"
elif typ==0:
return "line"
elif typ==2:
return "bSpline"
def addToBoundaryLayer(self,boundField=None,**fieldOpts):
"""Adds edge to a boundary layer field.
If no field is given, will create new one with given parameters and add it to a minField. If no minField exists,
will create a new one too and set it as background field.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addBoundaryLayerField`
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addMinField` and
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.genMinBkgd`.
Keyword Args:
boundField (pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField): Boundary layer field object.
fieldOpts (dict): See documentation of boundary layer field of all available options.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField: Boundary layer field around edge.
"""
#Generate attractor field if not given
if boundField==None:
boundField=self.domain.addBoundaryLayerField()
#Add Vertex
boundField.addEdgeByID(self.Id)
#Set options
boundField.setFieldAttributes(**fieldOpts)
#Generate background field
self.domain.genMinBkgd(FieldsList=[boundField.Id])
return boundField
def includedInLoop(self):
"""Checks if edge is included in a loop.
Returns:
tuple: Tuple containing:
* included (bool): True if included.
* loops (list): List of :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.lineLoop` objects that include edge.
"""
loops=[]
for i,loop in enumerate(self.domain.lineLoops):
if self in loop.edges:
loops.append(loop)
return len(loops)>0,loops
def includedInField(self):
"""Checks if edge is included in a field.
.. note:: Only checks for boundary layer fields, since they are the only ones who can evolve around edge.
Returns:
tuple: Tuple containing:
* included (bool): True if included.
* fields (list): List of :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.fields` objects that include edge.
"""
fields=[]
for field in self.domain.fields:
if field.typ=="boundaryLayer":
if self in field.EdgesList:
fields.append(fields)
return len(fields)>0,fields
def delete(self,debug=False):
"""Deletes edge if it is not used in any loop or field.
Returns:
bool: True if deletion was successful.
"""
incl,loops=self.includedInLoop()
if incl:
if debug:
printWarning("Was not able to delete edge with ID " + str(self.Id) +". Still part of loops" + str(pyfrp_misc_module.objAttrToList(loops,'Id')) + " .")
return False
incl,fields=self.includedInField()
if incl:
if debug:
printWarning("Was not able to delete edge with ID " + str(self.Id) +". Still part of field with ID " + str(pyfrp_misc_module.objAttrToList(fields,'Id')) )
return False
try:
if self.typ==0:
self.domain.lines.remove(self)
if self.typ==1:
self.domain.arcs.remove(self)
if self.typ==2:
self.domain.bSplines.remove(self)
self.domain.edges.remove(self)
except ValueError:
if debug:
printWarning("Could not remove edge " + str(self.Id)+" from elements list. Already seems to be removed.")
return False
return True
class line(edge):
"""Line class storing information from gmsh .geo lines.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain line belongs to.
v1 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Start vertex.
v2 (pyfrp.modules.pyfrp_gmsh_geometry.vertex): End vertex.
Id (int): ID of line.
"""
def __init__(self,domain,v1,v2,Id):
edge.__init__(self,domain,Id,0)
self.v1=v1
self.v2=v2
def getMiddle(self):
r"""Returns midpoint of line.
.. math:: m = \frac{x(v_1) + x(v_2)}{2}
Returns:
numpy.ndarray: Midpoint.
"""
return (self.v1.x+self.v2.x)/2.
def draw(self,ax=None,color=None,ann=None,backend="mpl",render=False,drawVertices=False,linewidth=1):
"""Draws line.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end (only in vtk mode).
drawVertices (bool): Also draw vertices.
Returns:
matplotlib.axes: Updated axes.
"""
if backend=="mpl":
ax=self.drawMPL(ax=ax,color=color,ann=ann,linewidth=linewidth)
if backend=="vtk":
ax=self.drawVTK(color=color,ann=ann,ax=ax,render=render,linewidth=linewidth)
if drawVertices:
ax=self.v1.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
ax=self.v2.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
return ax
def drawMPL(self,ax=None,color=None,ann=None,linewidth=1):
"""Draws line into matplotlib axes.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
Returns:
matplotlib.axes: Axes.
"""
if ann==None:
ann=False
if ax==None:
fig,axes = pyfrp_plot_module.makeGeometryPlot()
ax=axes[0]
ax.plot([self.v1.x[0],self.v2.x[0]],[self.v1.x[1],self.v2.x[1]],zs=[self.v1.x[2],self.v2.x[2]],color=color,linestyle='-',linewidth=linewidth)
if ann:
m=self.getMiddle()
ax.text(m[0]+self.domain.annXOffset, m[1]+self.domain.annYOffset, m[2]+self.domain.annZOffset, "l"+str(self.Id), None)
pyfrp_plot_module.redraw(ax)
return ax
def drawVTK(self,ax=None,color=None,ann=None,render=False,linewidth=1):
"""Draws line into VTK renderer.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new ``vtkRenderer``,
see also :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
See also :py:func:`pyfrp.modules.pyfrp_vtk_module.drawVTKLine`.
Keyword Args:
ax (vtk.vtkRenderer): Renderer to draw in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end.
Returns:
vtk.vtkRenderer: Updated renderer.
"""
if ann==None:
ann=False
if ax==None:
ax,renderWindow,renderWindowInteractor=pyfrp_vtk_module.makeVTKCanvas()
pyfrp_vtk_module.drawVTKLine(self.v1.x,self.v2.x,color=color,renderer=ax,linewidth=linewidth)
if ann:
printWarning("Annotations don't properly work with backend=vtk .")
m=self.getMiddle()
pyfrp_vtk_module.drawVTKText("p"+str(self.Id),[m[0]+self.domain.annXOffset, m[1]+self.domain.annYOffset, m[2]+self.domain.annZOffset],renderer=ax)
if render:
ax=pyfrp_vtk_module.renderVTK(ax,start=False)
return ax
def getLastVertex(self,orientation):
"""Returns last vertex of line given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of line.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==1:
return self.v2
elif orientation==-1:
return self.v1
else:
printError("Cannot return last vertex. Orientation " + str(orientation) + " unknown.")
return None
def getFirstVertex(self,orientation):
"""Returns first vertex of line given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of line.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==1:
return self.v1
elif orientation==-1:
return self.v2
else:
printError("Cannot return first vertex. Orientation " + str(orientation) + " unknown.")
return None
def writeToFile(self,f):
"""Writes line to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Line("+str(self.Id)+")= {" + str(self.v1.Id) + "," + str(self.v2.Id) + "};\n" )
return f
def getDirection(self,orientation):
"""Returns direction of line.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of line.
Returns:
numpy.ndarray: Direction of line.
"""
return self.getLastVertex(orientation).x-self.getFirstVertex(orientation).x
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.getFirstVertex(1),self.getLastVertex(1)]
class arc(edge):
"""Arc class storing information from gmsh .geo cicle.
Will compute ``angleOffset``, ``angle`` and ``pOffset`` on creation.
.. image:: ../imgs/pyfrp_gmsh_geometry/arc.png
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain arc belongs to.
vstart (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Start vertex.
vcenter (pyfrp.modules.pyfrp_gmsh_geometry.vertex): Center vertex.
vend (pyfrp.modules.pyfrp_gmsh_geometry.vertex): End vertex.
Id (int): ID of arc.
"""
def __init__(self,domain,vstart,vcenter,vend,Id):
edge.__init__(self,domain,Id,1)
self.vcenter=vcenter
self.vstart=vstart
self.vend=vend
self.radius=self.computeRadius()
self.pOffset=self.computePOffset()
self.angleOffset=self.computeAngleOffset()
self.angle=self.computeAngle()
def computeAngleOffset(self):
"""Computes and returns offset angle of arc.
"""
self.angleOffset=pyfrp_geometry_module.getAngle(self.pOffset,self.vstart.x-self.vcenter.x)
return self.angleOffset
def computeAngle(self):
"""Computes and returns angle of arc.
"""
self.angle=pyfrp_geometry_module.getAngle(self.vstart.x-self.vcenter.x,self.vend.x-self.vcenter.x)
return self.angle
def computePOffset(self):
"""Computes and returns offset point of arc.
"""
v1n,v2nb = self.getNormVec()
self.pOffset=self.radius*v2nb
self.pOffset=self.pOffset/np.linalg.norm(self.pOffset)
return self.pOffset
def getNormVec(self):
"""Computes and returns vectors normal to arc.
Returns:
tuple: Tuple containing:
* v1n (numpy.ndarray): Normal vector to ``vstart-vcenter``.
* v2n (numpy.ndarray): Normal vector to ``vend-vcenter``.
"""
v1=self.vstart.x-self.vcenter.x
v2=self.vend.x-self.vcenter.x
self.v1n = v1/np.linalg.norm(v1)
v2n = v2/np.linalg.norm(v2)
v2nb = v2n-np.dot(v2n,self.v1n)*self.v1n
self.v2nb = v2nb/np.linalg.norm(v2nb)
return self.v1n,self.v2nb
def getPlotVec(self):
"""Returns vectors for plotting arc.
Returns:
tuple: Tuple containing:
* x (numpy.ndarray): x-array.
* y (numpy.ndarray): y-array.
* z (numpy.ndarray): z-array.
"""
self.getNormVec()
if np.mod(self.angle,np.pi/2.)<0.01:
a = np.linspace(0,self.angle,1000)
else:
a = np.linspace(self.angleOffset-self.angle,self.angleOffset,1000)
x,y,z=self.getPointOnArc(a)
return x,y,z
def getPointOnArc(self,a):
"""Returns point on arc at angle ``a``.
Returns:
tuple: Tuple containing:
* x (float): x-coordinate.
* y (float): y-coordinate.
* z (float): z-coordinate.
"""
x = self.vcenter.x[0]+np.sin(a)*self.radius*self.v1n[0]+np.cos(a)*self.radius*self.v2nb[0]
y = self.vcenter.x[1]+np.sin(a)*self.radius*self.v1n[1]+np.cos(a)*self.radius*self.v2nb[1]
z = self.vcenter.x[2]+np.sin(a)*self.radius*self.v1n[2]+np.cos(a)*self.radius*self.v2nb[2]
return x,y,z
def computeRadius(self):
"""Computes and returns radius of arc.
Returns:
float: Radius of arc.
"""
self.radius=np.linalg.norm(self.vstart.x-self.vcenter.x)
return self.radius
def inArc(self,x,debug=False):
"""Tells if coordinate ``x`` is on arc or not.
Returns:
bool: ``True`` if on arc, ``False`` otherwise.
"""
a=self.computeAngle(array([self.radius,0])-self.vcenter.x,x-self.vcenter.x)
if np.mod(a,2*np.pi)<self.angle+self.angleOffset and self.angleOffset<=np.mod(a,2*np.pi):
return True
else:
return False
def getRadius(self):
"""Returns radius of arc."""
return self.radius
def getAngle(self):
"""Returns angle of arc."""
return self.angle
def getAngleOffset(self):
"""Returns offset angle of arc."""
return self.angleOffset
def getVstart(self):
"""Returns start vertex of arc."""
return self.vstart
def getVend(self):
"""Returns end vertex of arc."""
return self.vend
def getXstart(self):
"""Returns start coordinate of arc."""
return self.vstart.x
def getXend(self):
"""Returns end coordinate of arc."""
return self.vend.x
def getVcenter(self):
"""Returns center vertex of arc."""
return self.vcenter
def getXcenter(self):
"""Returns center coordinate of arc."""
return self.vcenter.x
def draw(self,ax=None,color=None,ann=None,backend="mpl",render=False,drawVertices=True,linewidth=1):
"""Draws arc.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end (only in vtk mode).
drawVertices (bool): Also draw vertices.
Returns:
matplotlib.axes: Updated axes.
"""
if backend=="mpl":
ax=self.drawMPL(ax=ax,color=color,ann=ann,linewidth=linewidth)
if backend=="vtk":
ax=self.drawVTK(color=color,ann=ann,ax=ax,render=render,linewidth=linewidth)
if drawVertices:
ax=self.vcenter.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
ax=self.vstart.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
ax=self.vend.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
return ax
def drawMPL(self,ax=None,color=None,ann=None,render=False,linewidth=1):
"""Draws arc into matplotlib axes.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
Returns:
matplotlib.axes: Axes.
"""
if ann==None:
ann=False
if ax==None:
fig,axes = pyfrp_plot_module.makeGeometryPlot()
ax=axes[0]
x,y,z=self.getPlotVec()
ax.plot(x,y,zs=z,color=color,linestyle='-',linewidth=linewidth)
if ann:
x,y,z=self.getPointOnArc(self.angle/2.)
ax.text(x+self.domain.annXOffset, y+self.domain.annYOffset, z+self.domain.annZOffset, "c"+str(self.Id), None)
pyfrp_plot_module.redraw(ax)
return ax
def drawVTK(self,ax=None,color=None,ann=None,render=False,linewidth=1):
"""Draws arc into VTK renderer.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new ``vtkRenderer``,
see also :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
See also :py:func:`pyfrp.modules.pyfrp_vtk_module.drawVTKArc`.
Keyword Args:
ax (vtk.vtkRenderer): Renderer to draw in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end.
Returns:
vtk.vtkRenderer: Updated renderer.
"""
if ann==None:
ann=False
if ax==None:
ax,renderWindow,renderWindowInteractor=pyfrp_vtk_module.makeVTKCanvas()
pyfrp_vtk_module.drawVTKArc(self.vstart.x,self.vcenter.x,self.vend.x,color=color,renderer=ax,linewidth=linewidth)
if ann:
printWarning("Annotations don't properly work with backend=vtk .")
m=self.getMiddle()
pyfrp_vtk_module.drawVTKText("p"+str(self.Id),[m[0]+self.domain.annXOffset, m[1]+self.domain.annYOffset, m[2]+self.domain.annZOffset],renderer=ax)
if render:
ax=pyfrp_vtk_module.renderVTK(ax,start=False)
return ax
def getLastVertex(self,orientation):
"""Returns last vertex of arc given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of arc.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==1:
return self.getVend()
elif orientation==-1:
return self.getVstart()
else:
printError("Cannot return last vertex. Orientation " + str(orientation) + " unknown.")
return None
def getFirstVertex(self,orientation):
"""Returns first vertex of arc given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of arc.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==-1:
return self.getVend()
elif orientation==1:
return self.getVstart()
else:
printError("Cannot return first vertex. Orientation " + str(orientation) + " unknown.")
return None
def writeToFile(self,f):
"""Writes arc to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Circle("+str(self.Id)+")= {" + str(self.vstart.Id) + ","+ str(self.vcenter.Id)+ "," + str(self.vend.Id) + "};\n" )
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.vcenter,self.vstart,self.vend]
class bSpline(edge):
"""Bspline class storing information from gmsh .geo BSpline.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain arc belongs to.
vertices (list): List of vertex objects.
Id (int): ID of spline.
"""
def __init__(self,domain,vertices,Id):
edge.__init__(self,domain,Id,2)
self.initVertices(vertices)
def initVertices(self,vertices):
"""Initiates list of vertices.
If vertex is given by Id, will use :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.getVertexById`
to identify vertex.
Args:
vertices (list): List of vertex objects.
Returns:
list: List of vertex objects.
"""
self.vertices=[]
for v in vertices:
if isinstance(v,int):
self.vertices.append(self.domain.getVertexById(v)[0])
else:
self.vertices.append(v)
return self.vertices
def writeToFile(self,f):
"""Writes bSpline to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("BSpline("+str(self.Id)+")= {" )
for i,v in enumerate(self.vertices):
f.write(str(v.Id))
if i!=len(self.vertices)-1:
f.write(",")
else:
f.write("};\n")
return f
def getMiddle(self):
r"""Returns midpoint of bSpline.
Midpoint in this case is defined as the coordinate of the mid vertex.
Returns:
numpy.ndarray: Midpoint.
"""
return self.vertices[int(np.floor(len(self.vertices)/2.))].x
def draw(self,ax=None,color=None,ann=None,backend="mpl",render=False,drawVertices=False,linewidth=1):
"""Draws spline.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end (only in vtk mode).
drawVertices (bool): Also draw vertices.
Returns:
matplotlib.axes: Updated axes.
"""
printWarning("Spline drawing currently just draws lines inbetween interpolation points.")
if backend=="mpl":
ax=self.drawMPL(ax=ax,color=color,ann=ann,linewidth=linewidth)
if backend=="vtk":
ax=self.drawVTK(color=color,ann=ann,ax=ax,render=render,linewidth=linewidth)
if drawVertices:
for v in self.vertices:
ax=v.draw(ax=ax,color=color,ann=ann,backend=backend,render=render,asSphere=False)
return ax
def drawMPL(self,ax=None,color=None,ann=None,linewidth=1):
"""Draws spline into matplotlib axes.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line.
ann (bool): Show annotations.
Returns:
matplotlib.axes: Axes.
"""
if ann==None:
ann=False
if ax==None:
fig,axes = pyfrp_plot_module.makeGeometryPlot()
ax=axes[0]
for i in range(len(self.vertices)-1):
ax.plot([self.vertices[i].x[0],self.vertices[i+1].x[0]],[self.vertices[i].x[1],self.vertices[i+1].x[1]],zs=[self.vertices[i].x[2],self.vertices[i+1].x[2]],color=color,linestyle='-',linewidth=linewidth)
if ann:
m=self.getMiddle()
ax.text(m[0]+self.domain.annXOffset, m[1]+self.domain.annYOffset, m[2]+self.domain.annZOffset, "l"+str(self.Id), None)
pyfrp_plot_module.redraw(ax)
return ax
def drawVTK(self,ax=None,color=None,ann=None,render=False,linewidth=1):
"""Draws spline into VTK renderer.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new ``vtkRenderer``,
see also :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
See also :py:func:`pyfrp.modules.pyfrp_vtk_module.drawVTKLine`.
Keyword Args:
ax (vtk.vtkRenderer): Renderer to draw in.
color (str): Color of line.
ann (bool): Show annotations.
render (bool): Render in the end.
Returns:
vtk.vtkRenderer: Updated renderer.
"""
if ann==None:
ann=False
if ax==None:
ax,renderWindow,renderWindowInteractor=pyfrp_vtk_module.makeVTKCanvas()
pyfrp_vtk_module.drawVTKPolyLine(pyfrp_misc_module.objAttrToList(self.vertices,'x'),color=color,renderer=ax,linewidth=linewidth)
if ann:
printWarning("Annotations don't properly work with backend=vtk .")
m=self.getMiddle()
pyfrp_vtk_module.drawVTKText("p"+str(self.Id),[m[0]+self.domain.annXOffset, m[1]+self.domain.annYOffset, m[2]+self.domain.annZOffset],renderer=ax)
if render:
ax=pyfrp_vtk_module.renderVTK(ax,start=False)
return ax
def getLastVertex(self,orientation):
"""Returns last vertex of arc given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of arc.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==1:
return self.vertices[-1]
elif orientation==-1:
return self.vertices[0]
else:
printError("Cannot return last vertex. Orientation " + str(orientation) + " unknown.")
return None
def getFirstVertex(self,orientation):
"""Returns first vertex of arc given a orientation.
Orientation can be either forward (1), or reverse (-1).
Args:
orientation (int): Orientation of arc.
Returns:
pyfrp.pyfrp_gmsh_geometry.vertex: Vertex.
"""
if orientation==1:
return self.vertices[0]
elif orientation==-1:
return self.vertices[-1]
else:
printError("Cannot return first vertex. Orientation " + str(orientation) + " unknown.")
return None
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return self.vertices
class lineLoop(gmshElement):
"""Lineloop class storing information from gmsh .geo.
Object has two major attributes:
* edges (list): List of pyfrp.moduels.pyfrp_gmsh_geometry.edge objects.
* orientations (list): List of orientations of each element, either ``1`` or ``-1``
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain loop belongs to.
edgeIDs (list): List of edge IDs.
Id (int): ID of loop.
"""
def __init__(self,domain,edgeIDs,ID):
gmshElement.__init__(self,domain,ID)
self.edges,self.orientations=self.initEdges(edgeIDs)
def initEdges(self,IDs):
"""Constructs ``edges`` and ``orientations`` list at object initiations
from list of IDs.
Args:
IDs (list): List of IDs
Returns:
tuple: Tuple containing:
* edges (list): List of pyfrp.moduels.pyfrp_gmsh_geometry.edge objects.
* orientations (list): List of orientations of each element, either ``1`` or ``-1``
"""
self.edges=[]
self.orientations=[]
for ID in IDs:
self.addEdgeByID(ID)
return self.edges,self.orientations
def addEdgeByID(self,ID):
"""Adds edge to lineloop.
Args:
ID (int): ID of edge to be added.
Returns:
list: Updated edgeIDs list.
"""
self.edges.append(self.domain.getEdgeById(abs(ID))[0])
self.orientations.append(np.sign(ID))
return self.edges
def insertEdgeByID(self,ID,pos):
"""Inserts edge to lineloop at position.
Args:
ID (int): ID of edge to be inserted.
pos (int): Position at which ID to be inserted.
Returns:
list: Updated edgeIDs list.
"""
self.edges.insert(pos,self.domain.getEdgeById(abs(ID))[0])
self.orientations.insert(pos,np.sign(ID))
return self.edges
def removeEdgeByID(self,ID):
"""Remove edge from lineloop.
Args:
ID (int): ID of edge to be removed.
Returns:
list: Updated edgeIDs list.
"""
idx=self.edges.index(abs(ID))
self.edges.remove(abs(ID))
self.orientations.pop(idx)
return self.edges
def reverseEdge(self,ID):
"""Reverses the orientation of an edge in the line loop.
Args:
ID (int): ID of edge to be reversed.
Returns:
list: Updated orientations list.
"""
e=self.domain.getEdgeById(abs(ID))[0]
self.orientations[self.edges.index(e)]=-self.orientations[self.edges.index(e)]
return self.orientations
def draw(self,ax=None,color='k',ann=None,backend='mpl',drawVertices=False,linewidth=1):
"""Draws complete line loop.
There are two different backends for drawing, namely
* Matplotlib (``backend='mpl'``)
* VTK (``backend='vtk'``)
Matplotlib is easier to handle, but slower. VTK is faster for complex
geometries.
.. note:: If ``backend=mpl``, ``ax`` should be a ``matplotlib.axes``, if ``backend='vtk'``,
``ax`` should be a ``vtk.vtkRenderer`` object.
.. note:: If no axes is given, will create new one,
see also :py:func:`pyfrp.modules.pyfrp_plot_module.makeGeometryPlot`
or :py:func:`pyfrp.modules.pyfrp_vtk_module.makeVTKCanvas`.
.. warning:: Annotations are not properly working with ``backend='vtk'``.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of line loop.
ann (bool): Show annotations.
drawVertices (bool): Also draw vertices.
Returns:
matplotlib.axes: Updated axes.
"""
for e in self.edges:
ax=e.draw(ax=ax,color=color,ann=ann,backend=backend,drawVertices=drawVertices,linewidth=linewidth)
return ax
def printLoop(self):
"""Prints loop.
"""
ids=np.array(pyfrp_misc_module.objAttrToList(self.edges,"Id"))
orients=np.array(self.orientations)
print "Line Loop with ID = "+ str(self.Id)+": "+str(ids*orients)
def fix(self):
"""Fixes loop.
"""
edgesNew=[self.edges[0]]
orientationsNew=[self.orientations[0]]
for i in range(1,len(self.edges)):
lastEdge=edgesNew[i-1]
vLast=lastEdge.getLastVertex(orientationsNew[i-1])
for j in range(len(self.edges)):
currEdge=self.edges[j]
currOrient=self.orientations[j]
if currEdge==lastEdge:
continue
if vLast == currEdge.getFirstVertex(currOrient):
edgesNew.append(currEdge)
orientationsNew.append(currOrient)
break
elif vLast == currEdge.getLastVertex(currOrient):
edgesNew.append(currEdge)
orientationsNew.append(-currOrient)
break
if j==len(self.edges)-1:
printWarning("Could not fix loop with ID" + str(self.Id))
print "Edge with ID " +str(lastEdge.Id) + " is not matching with any other edge."
return False
self.edges=edgesNew
self.orientations=orientationsNew
return True
def checkClosed(self,fix=False,debug=False):
"""Checks if lineLoop is closed.
Keyword Args:
debug (bool): Print debugging messages.
fix (bool): Close if necessary.
Returns:
bool: True if closed.
"""
b=True
for i in range(len(self.edges)):
#Get ID of edge
edge1Temp=self.edges[i]
orient1=self.orientations[i]
#Get ID of next edge
edge2Temp=self.edges[pyfrp_misc_module.modIdx(i+1,self.edges)]
orient2=self.orientations[pyfrp_misc_module.modIdx(i+1,self.edges)]
#Get ID of first/last vertex
firstVertexId=edge1Temp.getFirstVertex(orient1).Id
lastVertexId=edge2Temp.getLastVertex(orient2).Id
#Check if vertices are matching
if firstVertexId!=lastVertexId:
b=False
if fix:
self.reverseEdge(edge2Temp.Id)
b=True
if debug:
print "Edge with ID " +str(edge1Temp.Id) + " was not matching edge with ID " + str(edge2Temp.Id) + ". \n Fixed this."
if debug:
printWarning("lineLoop with ID " + str(self.Id) + " does not close." )
print "Edge with ID " +str(edge1Temp.Id) + " is not matching edge with ID " + str(edge2Temp.Id)
return b
def writeToFile(self,f):
"""Writes line loop to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Line Loop("+str(self.Id)+")= {" )
for i,s in enumerate(self.edges):
f.write(str(self.orientations[i]*s.Id))
if i!=len(self.edges)-1:
f.write(",")
else:
f.write("};\n")
return f
def getVertices(self):
"""Returns all vertices included in loop."""
vertices=[]
for i,edge in enumerate(self.edges):
vertices.append(edge.getFirstVertex(self.orientations[i]))
return vertices
def hasCommonEdge(self,loop):
"""Checks if lineLoop has common edges with other lineLoop.
Args:
loop (pyfrp.modules.pyfrp_gmsh_geometry.lineLoop): lineLoop object.
Returns:
tuple: Tuple containing:
* hasCommon (bool): True if loops have common edge.
* edges (list): List of common edges.
"""
edges=[]
for e in self.edges:
if e in loop.edges:
edges.append(e)
return len(edges)>0,edges
def fuse(self,loop,maxL=1000,debug=False,surface=None):
"""Fuses lineLoop with other loop.
"""
#Find common edge
b,commonEdges=self.hasCommonEdge(loop)
#print "commonEdges", pyfrp_misc_module.objAttrToList(commonEdges,'Id')
if not b:
printWarning("Cannot fuse lineLoop with ID " + str(self.Id) + " and lineLoop with ID "+ str(loop.Id) +" . Loops do not have common edge.")
return False
#Sort edges of loop and pop edges that are in common
idx=loop.edges.index(commonEdges[0])
idxLast=loop.edges.index(commonEdges[-1])+1
edgeTemp1,loop.edges=pyfrp_misc_module.popRange(loop.edges,idx,idxLast)
orientTemp1,loop.orientations=pyfrp_misc_module.popRange(loop.orientations,idx,idxLast)
#print "popped ",pyfrp_misc_module.objAttrToList(edgeTemp1,'Id')
#print "remain ",pyfrp_misc_module.objAttrToList(loop.edges,'Id')
edges=list(np.roll(loop.edges,len(loop.edges)-idx))
orientations=list(np.roll(loop.orientations,len(loop.edges)-idx))
#Pop common edge out of this loop
idx=self.edges.index(commonEdges[0])
idxLast=self.edges.index(commonEdges[-1])+1
edgeTemp2,self.edges=pyfrp_misc_module.popRange(self.edges,idx,idxLast)
orientTemp2,self.orientations=pyfrp_misc_module.popRange(self.orientations,idx,idxLast)
#print "popped 2 ",pyfrp_misc_module.objAttrToList(edgeTemp2,'Id')
#print "remain ",pyfrp_misc_module.objAttrToList(self.edges,'Id')
#Figure out if edges of other loop are in right order or need to be reversed
if orientTemp1[0]==orientTemp2[0]:
edges.reverse()
orientations.reverse()
if len(edges)>maxL:
if debug:
printWarning("Cannot fuse lineLoop with ID " + str(self.Id) + " and lineLoop with ID "+ str(loop.Id) +" . Resulting loop exceeds maxL.")
return False
#print "inserting ",pyfrp_misc_module.objAttrToList(edges,'Id')
#Insert edges of second loop into loop
self.edges[idx:idx]=edges
self.orientations[idx:idx]=orientations
#Check if closed in the end
#self.checkClosed(fix=True,debug=False)
self.fix()
#Delete second lineLoop
if surface!=None:
loop.removeFromSurface(surface)
b=loop.delete(debug=debug)
bs=[]
#Delete common edge
for edge in edgeTemp1:
bs.append(edge.delete(debug=debug))
return True
def approxBySpline(self,angleThresh=0.314,debug=False):
"""Approximates parts of line loop by spline.
Summarizes all consecutive lines in loop that have a small angle inbetween to
a spline.
.. note:: The choice of ``angleThresh`` is crucial for this function to work. It should be chosen
on a by-case basis if necessary.
Example:
Load test file:
>>> d,dd = pyfrp_gmsh_IO_module.readGeoFile("pyfrp/meshfiles/examples/splineTest.geo")
Draw:
>>> d.setAnnOffset([0.1,0.1,0.00])
>>> ax=d.draw(backend='mpl',asSphere=False,ann=True,annElements=[False,True,False])
returns the following
.. image:: ../imgs/pyfrp_gmsh_geometry/approxBySpline1.png
Approximate by spline and draw again
>>> d.lineLoops[0].approxBySpline(angleThresh=0.1*np.pi)
>>> ax=d.draw(backend='mpl',asSphere=False,ann=True,annElements=[False,True,False])
returns
.. image:: ../imgs/pyfrp_gmsh_geometry/approxBySpline2.png
And write to file
>>> d.writeToFile("pyfrp/meshfiles/examples/approximated.geo")
Keyword Args:
angleThresh (float): Angular threshold in radians.
debug (bool): Print debugging messages.
Returns:
bool: True if approximated.
"""
if len(self.edges)<=4:
return False
nEdges=len(self.edges)
# Bookkeeping lists
subst=[]
edgesSubst=[]
vertices=[]
appending=False
for i in range(len(self.edges)):
# Get indices of edges
idx1=pyfrp_misc_module.modIdx(i,self.edges)
idx2=pyfrp_misc_module.modIdx(i+1,self.edges)
# Get edges
e1=self.edges[idx1]
e2=self.edges[idx2]
# If either e1 or e2 is not a line, skip right away.
if e1.typ>0 or e2.typ>0:
continue
# Compute angle
angle=pyfrp_geometry_module.getAngle(e1.getDirection(self.orientations[idx1]),e2.getDirection(self.orientations[idx2]))
# If angle satisfies threshold criteria, append edge
if angle<=angleThresh:
vertices=vertices+[e1.getFirstVertex(self.orientations[idx1]),e1.getLastVertex(self.orientations[idx1])]
edgesSubst.append(e1)
appending=True
# If angle is too large or its the end of the loop, close spline
if angle>angleThresh or i==len(self.edges)-1:
if appending==True:
if e1 not in edgesSubst:
edgesSubst.append(e1)
"""IDEA: Check that includedInLoop gives same return for all edges in edgesSubst. Then finally add
spline.
"""
noSpline=False
for j,e in enumerate(edgesSubst):
inLoop,loops=e.includedInLoop()
if j>0:
if loops.sort()!=oldLoops.sort():
noSpline=True
print "cannot turn into spline because ", edgesSubst[j-1].Id , " and ",edgesSubst[j].Id
oldLoops=list(loops)
if not noSpline:
""" Check if e2 is already about to be substituted by a spline. Then we can simply add edges together to one spline.
Otherwise, just create new spline.
"""
if len(subst)>0:
if e2 in subst[0][0]:
subst[0][0]=edgesSubst+subst[0][0]
subst[0][2]=list(set(loops+subst[0][2]))
subst[0][1].vertices=vertices+subst[0][1].vertices
else:
spline=self.domain.addBSpline(vertices+[e1.getLastVertex(self.orientations[i])])
subst.append([edgesSubst,spline,loops])
else:
spline=self.domain.addBSpline(vertices+[e1.getLastVertex(self.orientations[i])])
subst.append([edgesSubst,spline,loops])
# Set back Bookkeeping variables
appending=False
edgesSubst=[]
vertices=[]
# Replace edges from loops with spline.
for sub in subst:
for loop in sub[2]:
try:
idx1=loop.edges.index(sub[0][0])
idx2=loop.edges.index(sub[0][-1])
except IndexError:
printWarning("approxBySpline: Cannot find index of either first or last edge.")
if idx1>idx2:
idxInsert=idx2
else:
idxInsert=idx1
for e in sub[0]:
try:
loop.edges.remove(e)
except ValueError:
printWarning("approxBySpline: Cannot remove edge "+str(e.Id)+" from loop"+loop.Id +".")
loop.edges.insert(idxInsert,sub[1])
if debug:
print "Substituted edges ", sub[0][0].Id , "-", sub[0][-1].Id, " with spline ", sub[1].Id
# Remove edges from domain.
for sub in subst:
for e in sub[0]:
e.delete(debug=debug)
return nEdges>len(self.edges)
def includedInSurface(self):
"""Checks if loop is included in a surface.
Returns:
tuple: Tuple containing:
* included (bool): True if included.
* loops (list): List of :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface` objects that include loop.
"""
surfaces=[]
for surface in self.domain.ruledSurfaces:
if self==surface.lineLoop:
surfaces.append(surface)
return len(surfaces)>0,surfaces
def delete(self,debug=False):
"""Deletes loop if it is not used in any surface.
Returns:
bool: True if deletion was successful.
"""
incl,surfaces=self.includedInSurface()
if incl:
printWarning("Was not able to delete loop with ID " + str(self.Id) +". Still part of " + str(len(surfaces)) + " surfaces.")
return False
self.domain.lineLoops.remove(self)
return True
def removeFromSurface(self,surface):
"""Removes lineLoop from surface.
"""
if self==surface.lineLoop:
surface.lineLoop=None
def removeFromAllSurfaces(self):
"""Removes lineLoop from all surfaces.
"""
for surface in self.domain.ruledSurfaces:
self.removeFromSurface(surface)
def isCoplanar(self):
"""Returns if all edges lie in single plane.
Does this by
* picking the first two vertices as first vector ``vec1 = v1 - v0``
* looping through vertices and computung the normal vector
between ``vec1`` and ``vec2=v[i]-v0``.
* Checking if all normal vectors are colinear.
Returns:
bool: True if coplanar.
"""
#Get vertex coordinates
coords=pyfrp_misc_module.objAttrToList(self.getVertices(),'x')
#Compute normals
normals=[]
for i in range(2,len(coords)):
n=pyfrp_geometry_module.computeNormal([coords[0],coords[1],coords[i]])
normals.append(n)
#Check if normals are all colinear
b=[]
for i in range(1,len(normals)):
# Make sure to skip normal vectors produced from colinear vectors
if sum(normals[i])==0:
continue
b.append(pyfrp_geometry_module.checkColinear(normals[0],normals[i]))
return sum(b)==len(b)
def getCenterOfMass(self):
"""Computes center of mass of surface.
Returns:
numpy.ndarray: Center of mass.
"""
coords=np.array(pyfrp_misc_module.objAttrToList(self.getVertices(),'x'))
return pyfrp_idx_module.getCenterOfMass(coords)
def getEdges(self):
"""Returns list of edges included in lineLoop."""
return self.edges
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return self.getEdges()
class ruledSurface(gmshElement):
"""ruledSurface class storing information from gmsh .geo.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
loopID (int): ID of surrounding loop.
Id (int): ID of surface.
"""
def __init__(self,domain,loopID,ID):
gmshElement.__init__(self,domain,ID)
self.initLineLoop(loopID)
def initLineLoop(self,loopID,debug=False,addPoints=False,iterations=2):
"""Checks length of lineLoop and if length of lineLoop is greater
than 4, will perform triangulation so Gmsh can handle surface."""
#Get lineLoop
self.lineLoop=self.domain.getLineLoopById(loopID)[0]
#Check if there is a line loop
if self.lineLoop==False:
return False,[]
#Check length
if len(self.lineLoop.edges)<=4:
return False,[]
#Compute normal vector
oldNormal=self.getNormal()
#Create triangulation
rmat=self.rotateToPlane('xy')
newNormal=self.getNormal()
#Get vertices
vertices=self.getVertices()
coords=pyfrp_misc_module.objAttrToList(vertices,'x')
#Get maximum volSize of vertices
maxVolSize=max(pyfrp_misc_module.objAttrToList(vertices,'volSize'))
#Get coordinates in plane
coordsPlane=np.asarray(coords)[:,np.where(abs(self.normal)!=1)[0]]
#Triangulate
tri,coordsTri = pyfrp_idx_module.triangulatePoly(coordsPlane,addPoints=addPoints,iterations=iterations,debug=True)
#Add 3D dimension to coordsTri
coordsTri=np.concatenate((coordsTri,coords[0][2]*np.ones((coordsTri.shape[0],1))),axis=1)
#Loop through each triangle
surfacesCreated=[]
vertices=[]
for i in range(len(tri)):
edges=[]
#Loop through each vertex
for j in range(len(tri[i])):
#Get first vertex, create it if necessary
v1=self.domain.getVertexByX(coordsTri[tri[i][j]])[0]
if v1==False:
v1=self.domain.addVertex(coordsTri[tri[i][j]],volSize=maxVolSize)
#Get second vertex, create it if necessary
v2=self.domain.getVertexByX(coordsTri[tri[i][pyfrp_misc_module.modIdx(j+1,tri[i])]])[0]
if v2==False:
v2=self.domain.addVertex(coordsTri[tri[i][pyfrp_misc_module.modIdx(j+1,tri[i])]],volSize=maxVolSize)
#Check if edge already exists
if not self.domain.getEdgeByVertices(v1,v2)[0]:
edges.append(self.domain.addLine(v1,v2))
else:
edges.append(self.domain.getEdgeByVertices(v1,v2)[0])
#Remember vertices so we can use them later for turning everything back.
vertices=vertices+[v1,v2]
#Add line loop
edgeIDs=pyfrp_misc_module.objAttrToList(edges,"Id")
loop=self.domain.addLineLoop(edgeIDs=edgeIDs)
loop.checkClosed(fix=False,debug=False)
loop.fix()
#Add ruledSurface if necessary
if i==0:
self.lineLoop=loop
else:
snew=self.domain.addRuledSurface(lineLoopID=loop.Id)
surfacesCreated.append(snew)
#Delete original loop
self.domain.lineLoops.remove(self.domain.getLineLoopById(loopID)[0])
#Remove duplicates in vertices list.
vertices=pyfrp_misc_module.remRepeatsList(vertices)
#Rotate back
for v in vertices:
v.x=np.dot(v.x,rmat.T)
return True,surfacesCreated
def normalToPlane(self):
"""Checks if surface lies within either x-y-/x-z-/y-z-plane.
Does this by checking if ``1.`` is in the normal vector.
Returns:
bool: True if in plane.
"""
return 1. in self.normal
def isCoplanar(self):
"""Returns if surface lies in single plane.
Returns:
bool: True if coplanar.
"""
return self.lineLoop.isCoplanar()
def getCenterOfMass(self):
"""Computes center of mass of surface.
Returns:
numpy.ndarray: Center of mass.
"""
return self.lineLoop.getCenterOfMass()
def getNormal(self,method='cross'):
"""Computes normal to surface.
First checks if surface is coplanar using :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface.isCoplanar`.
Then finds two independent vectors that span surface and passes them on to
:py:func:`pyfrp.modules.pyfrp_geometry_module.computeNormal`.
Currently there are two methods available:
* ``cross``, see also :py:func:`normalByCross`.
* ``newells``, see also :py:func:`newells`.
If method is unknown, will fall back to ``cross``.
Keyword Args:
method (str): Method of normal computation.
Returns:
numpy.ndarray: Normal vector to surface.
"""
if not self.isCoplanar():
printWarning("Surface " + str(self.Id) + " is not coplanar. The resulting normal vector might thus be not correct.")
#Get vertices
vertices=self.lineLoop.getVertices()
#Find non-colinear vertices
vec1=vertices[1].x-vertices[0].x
idx=None
for i in range(2,len(vertices)):
tempVec=vertices[i].x-vertices[0].x
if not pyfrp_geometry_module.checkColinear(vec1,tempVec):
idx=i
break
if idx==None:
printError("All points in surface "+str(self.Id) + " seem to be colinear. Will not be able to compute normal.")
print self.Id
self.draw(ann=True)
print pyfrp_misc_module.objAttrToList(self.lineLoop.getVertices(),'Id')
raw_input()
return np.zeros((3,))
#Compute normal
coords=[vertices[0].x,vertices[1].x,vertices[idx].x]
self.normal=pyfrp_geometry_module.computeNormal(coords,method=method)
return self.normal
def writeToFile(self,f):
"""Writes ruled surface to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Ruled Surface("+str(self.Id)+")= {"+str(self.lineLoop.Id)+ "};\n" )
return f
def addToBoundaryLayer(self,boundField=None,**fieldOpts):
"""Adds surface to a boundary layer field.
If no field is given, will create new one with given parameters and add it to a minField. If no minField exists,
will create a new one too and set it as background field.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addBoundaryLayerField`
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.addMinField` and
:py:func:`pyfrp.modules.pyfrp_gmsh_geometry.domain.genMinBkgd`.
Keyword Args:
boundField (pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField): Boundary layer field object.
fieldOpts (dict): See documentation of boundary layer field of all available options.
Returns:
pyfrp.modules.pyfrp_gmsh_geometry.boundaryLayerField: Boundary layer field around edge.
"""
#Generate boundary field if not given
if boundField==None:
boundField=self.domain.addBoundaryLayerField()
#Add Vertex
boundField.addFaceByID(self.Id)
#Set options
boundField.setFieldAttributes(**fieldOpts)
#Generate background field
self.domain.genMinBkgd(FieldsList=[boundField.Id])
return boundField
def hasCommonEdge(self,surface):
"""Checks if surface has common edge with other surface.
Args:
surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Surface object.
Returns:
tuple: Tuple containing:
* hasCommon (bool): True if loops have common edge.
* e (pyfrp.modules.pyfrp_gmsh_geometry.edge): Edge that is in common.
"""
return self.lineLoop.hasCommonEdge(surface.lineLoop)
def fuse(self,surface,maxL=1000,debug=False,sameNormal=False):
"""Fuses surface with another surface.
Will not do anything if surfaces do not have an edge in common.
"""
if not self.hasCommonEdge(surface)[0]:
if debug:
printWarning("Cannot fuse surface with ID " + str(self.Id) + " and surface with ID "+ str(surface.Id) +" . Surfaces do not have common edge.")
return False
if not self.hasSameNormal(surface):
if sameNormal:
if debug:
printWarning("Cannot fuse surface with ID " + str(self.Id) + " and surface with ID "+ str(surface.Id) +" . Not same normal, but sameNormal="+str(sameNormal))
return False
if debug:
printWarning("Fusing surface with ID " + str(self.Id) + " and surface with ID "+ str(surface.Id) +" will alter surface normal.")
b=self.lineLoop.fuse(surface.lineLoop,maxL=maxL,debug=debug,surface=surface)
if b:
surface.removeFromAllLoops()
surface.delete()
return b
def removeFromAllLoops(self):
"""Removes surface from all surface loops.
"""
for loop in self.domain.surfaceLoops:
if self in loop.surfaces:
loop.surfaces.remove(self)
def hasSameNormal(self,surface,sameOrientation=False):
"""Checks if sufrace has the same normal vector as another surface.
Args:
surface (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): Surface object.
Keyword Args:
sameOrientation (bool): Forces surfaces to also have same orientation.
Returns:
bool: True if same normal vector.
"""
# Compute Normals
self.getNormal()
surface.getNormal()
if sameOrientation:
if pyfrp_misc_module.compareVectors(self.normal,surface.normal):
return True
else:
if pyfrp_misc_module.compareVectors(self.normal,surface.normal) or pyfrp_misc_module.compareVectors(self.normal,-surface.normal):
return True
return False
def includedInLoop(self):
"""Checks if surface is included in a surfaceLoop.
Returns:
tuple: Tuple containing:
* included (bool): True if included.
* loops (list): List of :py:class:`pyfrp.modules.pyfrp_gmsh_geometry.surfaceLoops` objects that include surface.
"""
loops=[]
for loop in self.domain.surfaceLoops:
if self in loop.surfaces:
loops.append(loop)
return len(loops)>0,loops
def delete(self):
"""Deletes surface if it is not used in any surfaceLoop.
Returns:
bool: True if deletion was successful.
"""
incl,loops=self.includedInLoop()
if incl:
printWarning("Was not able to delete loop with ID " + str(self.Id) +". Still part of " + str(len(loops)) + " loops.")
return False
self.domain.ruledSurfaces.remove(self)
return True
def draw(self,ax=None,color='b',edgeColor='k',drawLoop=True,ann=None,alpha=0.2,backend='mpl',linewidth=1):
"""Draws surface and fills it with color.
.. note:: If ``ann=None``, will set ``ann=False``.
.. note:: If no axes is given, will create new one.
.. warning:: Does not work for surfaces surrounded by arcs yet.
Keyword Args:
ax (matplotlib.axes): Matplotlib axes to be plotted in.
color (str): Color of surface.
ann (bool): Show annotations.
edgeColor (str): Color of lineLoop around.
alpha (float): Transparency of surface.
Returns:
matplotlib.axes: Axes.
"""
if backend!='mpl':
printError("Cannot draw surface with backend="+backend+". Currently not supported")
return ax
if ann==None:
ann=False
if ax==None:
fig,axes = pyfrp_plot_module.makeGeometryPlot()
ax=axes[0]
if drawLoop:
ax=self.lineLoop.draw(ax=ax,color=edgeColor,ann=False,linewidth=linewidth)
for e in self.lineLoop.edges:
if e in self.domain.arcs:
printWarning("Cannot draw surface " + str(self.Id) + " yet. Surfaces including arcs are not supported yet.")
return ax
#Get Vertex coordinates in the form we want (this is probably unnecessarily complicated)
vertices=self.lineLoop.getVertices()
coords=pyfrp_misc_module.objAttrToList(vertices,'x')
coords=np.asarray(coords)
coords = zip(coords[:,0], coords[:,1], coords[:,2])
coordsNew=[]
coordsNew.append(list(coords))
#Add collection
coll=Poly3DCollection(coordsNew,alpha=alpha)
coll.set_facecolor(color)
ax.add_collection3d(coll)
#annotation
if ann:
com=pyfrp_idx_module.getCenterOfMass(np.array(pyfrp_misc_module.objAttrToList(vertices,'x')))
ax.text(com[0],com[1],com[2], "s"+str(self.Id), None)
#Redraw
pyfrp_plot_module.redraw(ax)
return ax
def getVertices(self):
"""Returns all vertices included in surface."""
if self.lineLoop==None:
return []
else:
return self.lineLoop.getVertices()
def getEdges(self):
"""Returns all edges included in surface."""
if self.lineLoop==None:
return []
else:
return self.lineLoop.getEdges()
def rotateToNormal(self,normal,ownNormal=None):
"""Rotates surface such that it lies in the plane
with normal vector ``normal``.
See also :py:func:`pyfrp.modules.pyfrp_geometry_module.getRotMatrix`.
Args:
normal (numpy.ndarray): Normal vector.
Returns:
numpy.ndarray: Rotation matrix.
"""
if ownNormal==None:
ownNormal=self.getNormal()
if pyfrp_geometry_module.checkColinear(normal,ownNormal):
rmat=np.identity(3)
else:
rmat=pyfrp_geometry_module.getRotMatrix(normal,ownNormal)
# Rotate
for v in self.getVertices():
v.x=np.dot(v.x,rmat)
return rmat
def rotateToSurface(self,s):
"""Rotates surface such that it lies in the same plane
as a given surface.
See also :py:func:`pyfrp.modules.pyfrp_geometry_module.getRotMatrix`.
Args:
s (pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface): A surface.
Returns:
numpy.ndarray: Rotation matrix.
"""
return self.rotateToNormal(s.getNormal())
def rotateToPlane(self,plane):
"""Rotates surface such that it lies in plane.
See also :py:func:`pyfrp.modules.pyfrp_geometry_module.getRotMatrix`.
Possible planes are:
* ``xy``
* ``xz``
* ``yz``
Args:
plane (str): Plane to rotate to.
Returns:
numpy.ndarray: Rotation matrix.
"""
if plane=="xz":
normal=np.array([0,0,1.])
elif plane=="yz":
normal=np.array([1.,0,0])
elif plane=="xy":
normal=np.array([0,0,1.])
else:
printError("Do not know the plane " +plane +". Will not rotate plane")
return
return self.rotateToNormal(normal)
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.lineLoop]
class surfaceLoop(gmshElement):
"""surfaceLoop class storing information from gmsh .geo.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain loop belongs to.
surfaceIDs (list): List of surfaces.
Id (int): ID of loop.
"""
def __init__(self,domain,surfaceIDs,ID):
gmshElement.__init__(self,domain,ID)
self.surfaces=self.initSurfaces(surfaceIDs)
def initSurfaces(self,IDs):
"""Constructs ``surfaces`` list at object initiations
from list of IDs.
Args:
IDs (list): List of IDs.
Returns:
list: List of pyfrp.modules.pyfrp_gmsh_geometry.ruledSurface objects.
"""
self.surfaces=[]
for ID in IDs:
self.addSurfaceByID(ID)
return self.surfaces
def addSurfaceByID(self,ID):
"""Adds surface to surfaceloop.
Args:
ID (int): ID of surface to be added.
Returns:
list: Updated surfaceIDs list.
"""
self.surfaces.append(self.domain.getRuledSurfaceById(ID)[0])
return self.surfaces
def insertSurfaceByID(self,ID,pos):
"""Inserts surface to surfaceloop at position.
Args:
ID (int): ID of surface to be inserted.
pos (int): Position at which ID to be inserted.
Returns:
list: Updated surfaceIDs list.
"""
self.surfaces.insert(pos,self.domain.getRuledSurfaceById(ID)[0])
return self.surfaces
def removeSurfaceByID(self,ID):
"""Remove surface from surfaceloop.
Args:
ID (int): ID of surface to be removed.
Returns:
list: Updated surfaceIDs list.
"""
self.surfaces.remove(self.domain.getRuledSurfaceById(ID)[0])
return self.surfaces
def writeToFile(self,f):
"""Writes surface loop to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Surface Loop("+str(self.Id)+")= {" )
for i,s in enumerate(self.surfaces):
f.write(str(s.Id))
if i!=len(self.surfaces)-1:
f.write(",")
else:
f.write("};\n")
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return self.surfaces
class volume(gmshElement):
"""Volume class storing information from gmsh .geo.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
surfaceLoopID (int): ID of surrounding surface loop.
Id (int): ID of surface loop.
"""
def __init__(self,domain,surfaceLoopID,ID):
gmshElement.__init__(self,domain,ID)
self.surfaceLoop=self.domain.getSurfaceLoopById(surfaceLoopID)[0]
def writeToFile(self,f):
"""Writes Volume to file.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f.write("Volume("+str(self.Id)+")= {"+str(self.surfaceLoop.Id)+ "};\n" )
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.surfaceLoop]
class field(gmshElement):
"""Field class storing information from gmsh .geo.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
typ (str): Type of field.
"""
def __init__(self,domain,typ,Id):
gmshElement.__init__(self,domain,Id)
self.typ=typ
def setAsBkgdField(self):
"""Sets this mesh as background field for the whole domain.
"""
self.domain.bkgdField=self
def isBkgdField(self):
"""Returns true if field is background field.
Returns:
bool: True if background field.
"""
return self.domain.bkgdField==self
def setFieldAttr(self,name,val):
"""Sets attribute of field.
.. note:: Value can have any data type.
Args:
name (str): Name of attribute.
val (str): Value.
"""
setattr(self,name,val)
def setFieldAttributes(self,**kwargs):
"""Sets multiple field attributes.
"""
for key, value in kwargs.iteritems():
self.setFieldAttributes(key,value)
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return []
class boxField(field):
"""Box field class storing information from gmsh .geo.
Subclasses from :py:class:`field`.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
Keyword Args:
volSizeIn (float): Mesh element volume inside box.
volSizeOut (float): Mesh element volume outside box.
xRange (list): Range of box field in x-direction given as ``[minVal,maxVal]``.
yRange (list): Range of box field in y-direction given as ``[minVal,maxVal]``.
zRange (list): Range of box field in z-direction given as ``[minVal,maxVal]``.
"""
def __init__(self,domain,Id,volSizeIn=10.,volSizeOut=20.,xRange=[],yRange=[],zRange=[]):
field.__init__(self,domain,"box",Id)
self.VOut=volSizeIn
self.VIn=volSizeOut
self.initBox(xRange,yRange,zRange)
def initBox(self,xRange,yRange,zRange):
"""Initializes bounding box.
"""
self.setRange('X',xRange)
self.setRange('Y',yRange)
self.setRange('Z',zRange)
def setRange(self,coord,vec):
"""Sets the bounding box range along a given axis.
Args:
coord (str): Axis along range is set (``"X","Y","Z"``)
vec (list): Range of box ``[minVal,maxVal]``
Returns:
tuple: Tuple containing:
* coordMin (float): New minimum value.
* coordMax (float): New maximum value.
"""
try:
setattr(self,coord+"Min",vec[0])
setattr(self,coord+"Max",vec[1])
except IndexError:
setattr(self,coord+"Min",None)
setattr(self,coord+"Max",None)
return getattr(self,coord+"Min"),getattr(self,coord+"Max")
def writeToFile(self,f):
"""Writes box field to file.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.writeBoxField`.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f=pyfrp_gmsh_IO_module.writeBoxField(f,self.Id,self.VIn,self.VOut,[self.XMin,self.XMax],[self.YMin,self.YMax],[self.ZMin,self.ZMax])
if self.isBkgdField():
f=pyfrp_gmsh_IO_module.writeBackgroundField(f,self.Id)
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return []
class attractorField(field):
"""Attractor field class storing information from gmsh .geo.
Subclasses from :py:class:`field`.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
Keyword Args:
NodesList (list): List of IDs of the Nodes that attractor field centers around.
"""
def __init__(self,domain,Id,NodesList=[]):
field.__init__(self,domain,"attractor",Id)
self.NodesList=self.initNodesList(NodesList)
def initNodesList(self,NodesList):
"""Adds a list of vertices to NodesList.
See also :py:func:`addNodeByID`.
Args:
NodesList (list): List of vertex IDs.
Returns:
list: Updated NodesList.
"""
self.NodesList=[]
for Id in NodesList:
self.addNodeByID(Id)
return self.NodesList
def addNodeByID(self,ID):
"""Adds vertex object to NodesList given the ID of the vertex.
Args:
ID (int): ID of vertex to be added.
Returns:
list: Updated NodesList.
"""
v,b=self.domain.getVertexById(ID)
if isinstance(b,int):
self.NodesList.append(v)
return self.NodesList
def setFieldAttr(self,name,val):
"""Sets field attribute.
.. note:: Value can have any data type.
Args:
name (str): Name of attribute.
val (float): Value of attribute.
"""
if name=="NodesList":
self.initNodesList(val)
else:
setattr(self,name,val)
def writeToFile(self,f):
"""Writes attractor field to file.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.writeAttractorField`.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f=pyfrp_gmsh_IO_module.writeAttractorField(f,self.Id,pyfrp_misc_module.objAttrToList(self.NodesList,'Id'))
if self.isBkgdField():
f=pyfrp_gmsh_IO_module.writeBackgroundField(f,self.Id)
return f
def includedInThresholdField(self):
"""Returns all the threshholdFields where attractorField is included in.
Returns:
list: List of threshholdField objects.
"""
threshFields=self.domain.getAllFieldsOfType("threshold")
included=[]
for tField in threshFields:
if tField.IField==self.Id:
included.append(tField)
return included
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return self.NodesList
class thresholdField(field):
"""Threshold field class storing information from gmsh .geo.
Subclasses from :py:class:`field`.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
Keyword Args:
IField (int): ID of vertex that is center to threshold field.
LcMin (float): Minimum volSize of threshold field.
LcMax (float): Maximum volSize of threshold field.
DistMin (float): Minimun density of field.
DistMax (float): Maximum density of field.
"""
def __init__(self,domain,Id,IField=None,LcMin=5.,LcMax=20.,DistMin=30.,DistMax=60.):
field.__init__(self,domain,"threshold",Id)
self.IField=IField
self.LcMin=LcMin
self.LcMax=LcMax
self.DistMin=DistMin
self.DistMax=DistMax
def writeToFile(self,f):
"""Writes threshold field to file.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.writeThresholdField`.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f=pyfrp_gmsh_IO_module.writeThresholdField(f,self.Id,self.IField,self.LcMin,self.LcMax,self.DistMin,self.DistMax)
if self.isBkgdField():
f=pyfrp_gmsh_IO_module.writeBackgroundField(f,self.Id)
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.getDomain().getVertexById(self.IField)]
class minField(field):
"""Minimum field class storing information from gmsh .geo.
Subclasses from :py:class:`field`.
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
Keyword Args:
FieldsList (list): List of field IDs.
"""
def __init__(self,domain,Id,FieldsList=[]):
field.__init__(self,domain,"min",Id)
self.FieldsList=self.initFieldsList(FieldsList)
def setFieldAttr(self,name,val):
if name=="FieldsList":
self.initFieldsList(val)
else:
setattr(self,name,val)
def initFieldsList(self,FieldsList):
"""Adds a list of vertices to NodesList.
See also :py:func:`addNodeByID`.
Args:
FieldsList (list): List of field IDs.
Returns:
list: Updated FieldsList.
"""
self.FieldsList=[]
for Id in FieldsList:
self.addFieldByID(Id)
return self.FieldsList
def addFieldByID(self,ID):
"""Adds field object to FieldsList given the ID of the field.
Args:
ID (int): ID of field to be added.
Returns:
list: Updated FieldsList.
"""
f,b=self.domain.getFieldById(ID)
if isinstance(b,int):
self.FieldsList.append(f)
return self.FieldsList
def writeToFile(self,f):
"""Writes minimum field to file.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.writeMinField`.
Args:
f (file): File to write to.
Returns:
file: File.
"""
f=pyfrp_gmsh_IO_module.writeMinField(f,self.Id,pyfrp_misc_module.objAttrToList(self.FieldsList,'Id'))
if self.isBkgdField():
f=pyfrp_gmsh_IO_module.writeBackgroundField(f,self.Id)
return f
def addAllFields(self):
"""Adds all fields in domain to FieldsList if not
already in there.
Returns:
list: Updated FieldsList.
"""
for f in self.domain.fields:
if f not in self.FieldsList and f!=self:
self.FieldsList.append(f)
return self.FieldsList
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return self.FieldsList
class boundaryLayerField(field):
r"""Boundary Layer field class storing information from gmsh .geo.
Creates boundary layer mesh around vertices, edges or surfacesin geometry. Boundary layer density
is given by
.. math:: h_{wall} * ratio^{(dist/h_{wall})}.
Subclasses from :py:class:`field`.
Example: Adding a box surrounded with a boundary layer to a geometry:
>>> vertices,lines,loops,surfaces,sloops,vols=d.addCuboidByParameters([256-50,256-50,-160],100,100,120,10,genVol=False)
Adjust volSize:
>>> d.setGlobalVolSize(30.)
Add boundary layer:
>>> volSizeLayer=10.
>>> blf=d.addBoundaryLayerField(hfar=volSizeLayer,hwall_n=volSizeLayer,hwall_t=volSizeLayer,thickness=30.,Quads=0.)
>>> blf.addFaceListByID(pyfrp_misc_module.objAttrToList(surfaces,'Id'))
>>> blf.setAsBkgdField()
>>> d.draw()
.. image:: ../imgs/pyfrp_gmsh_geometry/boundaryLayerField_geometry.png
Write to file:
>>> d.writeToFile("dome_boundary.geo")
Generate mesh:
>>> fnMesh=pyfrp_gmsh_module.runGmsh("dome_boundary.geo")
>>> m=pyfrp_mesh.mesh(None)
>>> m.setFnMesh(fnMesh)
>>> m.plotMesh()
.. image:: ../imgs/pyfrp_gmsh_geometry/boundaryLayerField_mesh.png
See also http://gmsh.info/doc/texinfo/gmsh.html#Specifying-mesh-element-sizes .
Args:
domain (pyfrp.modules.pyfrp_gmsh_geometry.domain): Domain surface belongs to.
Id (int): ID of field.
Keyword Args:
AnisoMax (float): Threshold angle for creating a mesh fan in the boundary layer.
IntersectMetrics (int): Intersect metrics of all faces.
Quad (int): Generate recombined elements in the boundary layer.
har (float): Element size far from the wall.
hwall_n (float): Mesh Size Normal to the The Wall.
hwall_t (float): Mesh Size Tangent to the Wall.
ratio (float): Size Ratio Between Two Successive Layers.
thickness (float): Maximal thickness of the boundary layer.
List (list): List of field IDs.
"""
def __init__(self,domain,Id,AnisoMax=10000000000,hwall_n=1.,hwall_t=1,ratio=1.1,thickness=10.,hfar=1.,IntersectMetrics=1,Quads=0.):
field.__init__(self,domain,"boundaryLayer",Id)
self.AnisoMax=AnisoMax
self.Quads=Quads
self.hfar=hfar
self.hwall_n=hwall_n
self.hwall_t=hwall_t
self.IntersectMetrics=IntersectMetrics
self.ratio=ratio
self.thickness=thickness
self.EdgesList=[]
self.FacesList=[]
self.FanNodesList=[]
self.FansList=[]
self.NodesList=[]
def addEdgeListByID(self,IDs):
"""Adds a list of edge objects to EdgesList given the ID of the edges.
Args:
IDs (list): List of IDs of edges to be added.
Returns:
list: Updated EgesList.
"""
for ID in IDs:
self.addEdgeByID(ID)
return self.EdgesList
def addFaceListByID(self,IDs):
"""Adds a list of surfaces objects to FacesList given the ID of the surfaces.
Args:
IDs (list): List of IDs of surfaces to be added.
Returns:
list: Updated FacesList.
"""
for ID in IDs:
self.addFaceByID(ID)
return self.FacesList
def addNodeListByID(self,IDs):
"""Adds a list of vertex objects to NodesList given the ID of the vertex.
Args:
IDs (list): List of IDs of vertices to be added.
Returns:
list: Updated NodesList.
"""
for ID in IDs:
self.addNodeByID(ID)
return self.NodesList
def addEdgeByID(self,ID):
"""Adds edge object to EdgesList given the ID of the edge.
Args:
ID (int): ID of edge to be added.
Returns:
list: Updated EgesList.
"""
v,b=self.domain.getEdgeById(ID)
if isinstance(b,int):
self.EdgesList.append(v)
return self.EdgesList
def addFaceByID(self,ID):
"""Adds surface object to FacesList given the ID of the surface.
Args:
ID (int): ID of surface to be added.
Returns:
list: Updated FacesList.
"""
v,b=self.domain.getRuledSurfaceById(ID)
if isinstance(b,int):
self.FacesList.append(v)
return self.FacesList
def addNodeByID(self,ID):
"""Adds vertex object to NodesList given the ID of the vertex.
Args:
ID (int): ID of vertex to be added.
Returns:
list: Updated NodesList.
"""
v,b=self.domain.getVertexById(ID)
if isinstance(b,int):
self.NodesList.append(v)
return self.NodesList
def buildElementDict(self):
"""Builds element dictionary for writing to file.
"""
elements={}
for elmnt in ["EdgesList","FacesList","NodesList"]:
if len(getattr(self,elmnt))>0:
elements[elmnt]=pyfrp_misc_module.objAttrToList(getattr(self,elmnt),'Id')
return elements
def writeToFile(self,f):
"""Writes boundaryLayerField to file.
See also :py:func:`pyfrp.modules.pyfrp_gmsh_IO_module.writeBoundaryLayerField`.
Args:
f (file): File to write to.
Returns:
file: File.
"""
#elements=pyfrp_misc_module.objAttr2Dict(self,attr=["EdgesList","FacesList","NodesList"])
elements=self.buildElementDict()
fieldOpts=pyfrp_misc_module.objAttr2Dict(self,attr=["AnisoMax","hwall_n","hwall_t","ratio","thickness","hfar","IntersectMetrics","Quads"])
f=pyfrp_gmsh_IO_module.writeBoundaryLayerField(f,self.Id,elements,fieldOpts)
if self.isBkgdField():
f=pyfrp_gmsh_IO_module.writeBackgroundField(f,self.Id)
return f
def getSubElements(self):
"""Returns all elements that define this element.
Returns:
list: List of elements.
"""
return [self.FacesList,self.EdgesList,self.NodesList]
def setFieldAttr(self,name,val):
"""Sets field attribute.
.. note:: Value can have any data type.
Args:
name (str): Name of attribute.
val (float): Value of attribute.
"""
self.EdgesList=[]
self.FacesList=[]
self.FanNodesList=[]
self.FansList=[]
self.NodesList=[]
if name=="NodesList":
self.addNodeListByID(val)
elif name=="FacesList":
self.addFaceListByID(val)
elif name=="EdgesList":
self.addEdgeListByID(val)
else:
setattr(self,name,val)
| gpl-3.0 |
ivoflipse/devide | testing/matplotlib_tests.py | 7 | 2426 | """Module to test basic matplotlib functionality.
"""
import os
import unittest
import tempfile
class MPLTest(unittest.TestCase):
def test_figure_output(self):
"""Test if matplotlib figure can be generated and wrote to disc.
"""
# make sure the pythonshell is running
self._devide_app.get_interface()._handler_menu_python_shell(None)
# create new figure
python_shell = self._devide_app.get_interface()._python_shell
f = python_shell.mpl_new_figure()
import pylab
# unfortunately, it's almost impossible to get pixel-identical
# rendering on all platforms, so we can only check that the plot
# itself is correct (all font-rendering is disabled)
# make sure we hardcode the font! (previous experiment)
#pylab.rcParams['font.sans-serif'] = ['Bitstream Vera Sans']
#pylab.rc('font', family='sans-serif')
from pylab import arange, plot, sin, cos, legend, grid, xlabel, ylabel
a = arange(-30, 30, 0.01)
plot(a, sin(a) / a, label='sinc(x)')
plot(a, cos(a), label='cos(x)')
#legend()
grid()
#xlabel('x')
#ylabel('f(x)')
# disable x and y ticks (no fonts allowed, remember)
pylab.xticks([])
pylab.yticks([])
# width and height in inches
f.set_figwidth(7.9)
f.set_figheight(5.28)
# and save it to disc
filename1 = tempfile.mktemp(suffix='.png', prefix='tmp', dir=None)
f.savefig(filename1, dpi=100)
# get rid of the figure
python_shell.mpl_close_figure(f)
# now compare the bugger
test_fn = os.path.join(self._devide_testing.get_images_dir(),
'mpl_test_figure_output.png')
err = self._devide_testing.compare_png_images(test_fn, filename1)
self.failUnless(err == 0, '%s differs from %s, err = %.2f' %
(filename1, test_fn, err))
def get_suite(devide_testing):
devide_app = devide_testing.devide_app
# both of these tests require wx
mm = devide_app.get_module_manager()
mpl_suite = unittest.TestSuite()
if 'matplotlib_kit' in mm.module_kits.module_kit_list:
t = MPLTest('test_figure_output')
t._devide_app = devide_app
t._devide_testing = devide_testing
mpl_suite.addTest(t)
return mpl_suite
| bsd-3-clause |
ua-snap/downscale | snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/cru_cl20_1961_1990_climatology_preprocess.py | 1 | 12525 | import numpy as np # hack to solve a lib issue in the function args of xyztogrid
def cru_xyz_to_shp( in_xyz, lon_col, lat_col, crs, output_filename ):
'''
convert the cru cl2.0 1961-1990 Climatology data to a shapefile.
*can handle the .dat format even if compressed with .gzip extension.
PARAMETERS:
-----------
in_xyz = path to the .dat or .dat.gz downloaded cru cl2.0 file from UK Met Office site
lon_col = string name of column storing longitudes
lat_col = string name of column storing latitudes
crs = proj4string or epsg code
output_filename = string path to the output filename to be created
RETURNS
-------
output_filename as string
'''
colnames = ['lat', 'lon', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
from shapely.geometry import Point
import pandas as pd
import geopandas as gpd
import os
if os.path.splitext( in_xyz )[1] == '.gz':
cru_df = pd.read_csv( in_xyz, delim_whitespace=True, compression='gzip', header=None, names=colnames )
else:
cru_df = pd.read_csv( in_xyz, delim_whitespace=True, header=None, names=colnames )
# create a column named geometry with shapely geometry objects for each row
def f( x ):
''' return a Point shapely object for each x,y pair'''
return Point( x.lon, x.lat )
cru_df[ 'geometry' ] = cru_df.apply( f, axis=1 )
cru_df = gpd.GeoDataFrame( cru_df ) # convert to GeoDataFrame
cru_df.to_file( output_filename, 'ESRI Shapefile' )
return output_filename
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def extent_to_shapefile( extent, output_shapefile, proj4string ):
''' convert an extent to a shapefile using its proj4string '''
import geopandas as gpd
from shapely.geometry import Polygon
gpd.GeoDataFrame( {'extent_id':1, 'geometry':Polygon( extent )}, index=[1], crs=proj4string ).to_file( output_shapefile, 'ESRI Shapefile' )
return output_shapefile
def pad_bounds( rst, npixels, crs, output_shapefile ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
new_ext = bounds_to_extent( new_bounds )
return extent_to_shapefile( new_ext, output_shapefile, crs )
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def crop_to_bounds2( rasterio_rst, bounds, output_filename, mask=None, mask_value=None ):
'''
take a rasterio raster object and crop it to a smaller bounding box
masking is supported where masked values are 0 and unmasked values are 1
PARAMETERS
----------
rasterio_rst = rasterio raster object
bounds = rasterio style bounds (left, bottom, right, top)
output_filename = string path to the raster file to be created
mask = a 2d numpy array of the same shape as rasterio_rst with
masked values = 0 and unmasked = 1
RETURNS
-------
file path to the newly created file -- essentially the value of output_filename
'''
from rasterio import Affine as A
window = rasterio_rst.window( *bounds )
xmin, ymin, xmax, ymax = rasterio_rst.window_bounds( window )
row_res, col_res = rasterio_rst.res
arr = rasterio_rst.read( 1, window=window )
if mask:
arr[ mask != 1 ] = mask_value
nodata = mask_value
else:
nodata = rasterio_rst.meta[ 'nodata' ]
meta = {}
meta.update( compress='lzw',
affine=A( col_res, 0.0, xmin, 0.0, -row_res, ymax ),
height=row_res,
width=col_res,
transform=[xmin, col_res, 0.0, ymax, 0.0, -row_res],
crs=rasterio_rst.meta,
nodata=nodata,
dtype=rasterio_rst.meta[ 'dtype' ],
count=1,
driver=u'GTiff' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write_band( 1, arr )
return output_filename
def crop_to_bounds( rasterio_rst, bounds ):
''' crop a raster by a window made from bounds of another domain '''
window = rasterio_rst.window( *bounds )
return rasterio_rst.read( 1, window=window )
def interpolate_akcan( x, y, z, grid, expanded_meta, template_rst, output_filename, method='cubic', output_dtype=np.float32 ):
'''
interpolate across the alaska canada domains and crop / mask to that extent
'''
cru_interp = xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 )
cru_interp = np.nan_to_num( cru_interp )
# convert to in memory rasterio object
expanded_meta.update( driver='MEM' )
cru_interpolated = rasterio.open( '', mode='w', **expanded_meta )
cru_interpolated.write_band( 1, cru_interp )
akcan = crop_to_bounds( cru_interpolated, template_rst.bounds )
meta = template_rst.meta
meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **meta ) as out:
mask = template_rst.read_masks( 1 )
akcan[ mask == 0 ] = meta[ 'nodata' ]
akcan = np.ma.masked_where( mask == 0, akcan )
akcan.fill_value = meta[ 'nodata' ]
out.write_band( 1, akcan )
return output_filename
def run( args ):
return interpolate_akcan( **args )
if __name__ == '__main__':
import os, rasterio, glob, fiona
import numpy as np
import pandas as pd
import geopandas as gpd
from rasterio import Affine as A
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess CRU CL2.0 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-p", "--base_path", action='store', dest='base_path', type=str, help="path to parent directory with a subdirector(ies)y storing the data" )
parser.add_argument( "-cru", "--cru_filename", action='store', dest='cru_filename', type=str, help="string path to the .tar.gz file location, downloaded from the CRU site" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string abbreviated name of the variable being processed." )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="string path to a template raster dataset to match the CRU CL2.0 to." )
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data'
# # open the Climatic Research Unit (CRU) CL2.0 data downloaded from:
# # http://www.cru.uea.ac.uk/cru/data/hrg/tmc/
# # cru_filename = os.path.join( cru_folder, 'grid_10min_tmp.dat.gz'
# cru_filename = os.path.join( cru_folder, 'grid_10min_sunp.dat.gz'
# variable = 'sunp'
# parse and unpack the args
args = parser.parse_args()
base_path = args.base_path
cru_filename = args.cru_filename
variable = args.variable
template_raster_fn = args.template_raster_fn
# build an output path to store the data generated with this script
cru_path = os.path.join( base_path, 'cru_ts20', variable )
if not os.path.exists( cru_path ):
os.makedirs( cru_path )
# read in the gzipped .dat file downloaded from the MET Office UK
colnames = [ 'lat', 'lon', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
cru_df = pd.read_csv( cru_filename, delim_whitespace=True, compression='gzip', header=None, names=colnames )
# convert to point shapefile
cru_shp_fn = os.path.join( cru_path, 'cru_'+variable+'_ts20_1961_1990_climatology.shp' )
cru_xyz_to_shp( cru_filename, 'lon', 'lat', {'init':'epsg:4326'}, cru_shp_fn )
# template dataset
template_raster = rasterio.open( template_raster_fn )
resolution = template_raster.res
template_meta = template_raster.meta
# pad the bounds of the akcan template dataset
crs = { 'init':'epsg:3338' }
extent_path = os.path.join( cru_path, 'extents' )
if not os.path.exists( extent_path ):
os.makedirs( extent_path )
new_ext_fn = os.path.join( extent_path, 'akcan_extent.shp' )
npixels = ( -200, -2000, 200, 200 )
pad_bounds( template_raster, npixels, crs, new_ext_fn )
# filename for a newly clipped and reprojected shapefile using the above padded bounds shape
intermediate_path = os.path.join( cru_path, 'intermediate' )
if not os.path.exists( intermediate_path ):
os.makedirs( intermediate_path )
expanded_ext_fn = os.path.join( intermediate_path, variable + '_cru_ts20_1961_1990_climatology_3338_akcan_expanded.shp' )
# reproject / crop to the AKCAN extent, the cru shapefile built above using ogr2ogr
os.system( "ogr2ogr -overwrite -f 'ESRI Shapefile' -clipdst " + new_ext_fn + " -s_srs 'EPSG:4326' -t_srs 'EPSG:3338' " + expanded_ext_fn + " " + cru_shp_fn )
# -wrapdateline -- removed since it is not a geog srs output
# generate metadata for the expanded extent to interpolate to
xmin, ymin, xmax, ymax = fiona.open( new_ext_fn ).bounds
cols = (xmax - xmin) / resolution[1]
rows = (ymax - ymin) / resolution[0]
# copy/update metadata to expanded extent
expanded_meta = template_meta
expanded_meta[ 'affine' ] = A( resolution[0], 0.0, xmin, 0.0, -resolution[1], ymax )
expanded_meta[ 'crs' ] = { 'init':'epsg:3338' }
expanded_meta[ 'height' ] = rows
expanded_meta[ 'width' ] = cols
expanded_meta[ 'transform' ] = expanded_meta[ 'affine' ].to_gdal()
# read in the clipped and reprojected cru shapefile using geopandas
cru_gdf = gpd.read_file( expanded_ext_fn )
# update lon and lat to the 3338
cru_gdf.lon = cru_gdf.geometry.apply( lambda x: x.x )
cru_gdf.lat = cru_gdf.geometry.apply( lambda x: x.y )
# build the interpolation input values
x = np.array(cru_gdf.lon.tolist())
y = np.array(cru_gdf.lat.tolist())
# build the output grid
xi = np.linspace( xmin, xmax, cols )
yi = np.linspace( ymin, ymax, rows )
xi, yi = np.meshgrid( xi, yi )
akcan_path = os.path.join( cru_path, 'akcan' )
if not os.path.exists( akcan_path ):
os.makedirs( akcan_path )
# build some args
months = ['01','02','03','04','05','06','07','08','09','10','11','12']
output_filenames = [ os.path.join( akcan_path, variable+'_cru_cl20_akcan_'+month+'_1961_1990.tif' ) for month in months ]
# run it in parallel -- the pool is not working currently! switching to serial
args_list = [ { 'x':x, 'y':y, 'z':np.array(cru_gdf[ month ]), 'grid':(xi,yi), 'expanded_meta':expanded_meta, 'template_rst':template_raster, 'output_filename':out_fn } for month, out_fn in zip( months, output_filenames ) ]
# pool = mp.Pool( 4 )
out = map( lambda x: run( x ), args_list )
# out = pool.map( lambda x: run( x ), args_list )
# pool.close()
# # # EXAMPLE OF USE # # #
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# cru_folder = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS20'
# # var_fn_dict = { 'hur':os.path.join( cru_folder, 'grid_10min_reh.dat.gz'),'tas':os.path.join( cru_folder, 'grid_10min_tmp.dat.gz'), 'sunp':os.path.join( cru_folder, 'grid_10min_sunp.dat.gz' ) }
# var_fn_dict = { 'pre':os.path.join( cru_folder, 'grid_10min_pre.dat.gz' ) } # just a test.
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/CRU_TEM_2016' #cru_october_final'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# for variable, cru_filename in var_fn_dict.iteritems():
# # print 'working on : %s' % variable
# os.system( 'ipython -- cru_cl20_1961_1990_climatology_preprocess.py -p ' + base_path + ' -cru ' + cru_filename + ' -v ' + variable + ' -tr ' + template_raster_fn )
| mit |
yanyuchka/infovisproj-temp | crash_cleaner.py | 1 | 3255 | #!/user/bin/python
# this python script cleans raw crash data and subsets the last n days of observations
import pandas as pd
import numpy as np
import datetime as dt
import re
import os
import logging
dpath = './'
def date_parser(ds):
if type(ds) == str:
return dt.datetime.date(dt.datetime.strptime(ds, "%m/%d/%Y"))
else:
return np.nan
def time_parser(ts):
if type(ts) == str:
return dt.datetime.time(dt.datetime.strptime(ts, "%H:%M"))
else:
return np.nan
#zip-s war by [email protected]
def zip_cleaner(s):
if type(s) != str:
return np.nan
elif re.match('^\d\d\d\d\d$', s):
return s
elif re.match('^\d\d\d\d\d-\d*$', s):
return re.sub('-\d*$', '', s)
else:
return np.nan
def test_zip_cleaner():
assert '12345' == zip_cleaner('12345')
assert '12345' == zip_cleaner('12345-1234')
assert np.isnan( zip_cleaner(np.nan) )
assert np.isnan( zip_cleaner('1234') )
assert np.isnan( zip_cleaner('0') )
assert np.isnan( zip_cleaner('UNKNOWN'))
# read raw crash data
def read_crash_csv(data):
df = pd.read_csv(data,
dtype={
'DATE' : str,
'TIME' : str,
'BOROUGH': str,
'ZIP CODE': str,
'LATITUDE': np.floating,
'LONGITUDE': np.floating,
'LOCATION' : str, # derived type
'ON STREET NAME' : str,
'CROSS STREET NAME': str,
'OFF STREET NAME' : str,
'NUMBER OF PERSONS INJURED' : np.integer,
'NUMBER OF PERSONS KILLED' : np.integer,
'NUMBER OF PEDESTRIANS INJURED' : np.integer,
'NUMBER OF PEDESTRIANS KILLED' : np.integer,
'NUMBER OF CYCLIST INJURED' : np.integer,
'NUMBER OF CYCLIST KILLED' : np.integer,
'NUMBER OF MOTORIST INJURED' : np.integer,
'NUMBER OF MOTORIST KILLED' : np.integer,
'CONTRIBUTING FACTOR VEHICLE 1' : str,
'CONTRIBUTING FACTOR VEHICLE 2' : str,
'CONTRIBUTING FACTOR VEHICLE 3' : str,
'CONTRIBUTING FACTOR VEHICLE 4' : str,
'CONTRIBUTING FACTOR VEHICLE 5' : str,
'UNIQUE KEY' : np.integer,
'VEHICLE TYPE CODE 1' : str,
'VEHICLE TYPE CODE 2' : str,
'VEHICLE TYPE CODE 3' : str,
'VEHICLE TYPE CODE 4' : str,
'VEHICLE TYPE CODE 5' : str})
df['DATE'] = map(date_parser, df['DATE'])
df['TIME'] = map(time_parser, df['TIME'])
df['LOCATION'] = zip(df.LATITUDE,df.LONGITUDE)
df['ZIP CODE'] = map(zip_cleaner,df['ZIP CODE'])
df.columns = [field.replace(" ","_") for field in df.columns]
return(df)
#subset last n days of crash data and log number of records in data sets
def sample_crash_data(n,path,folders):
df = read_crash_csv(os.path.join(path,folders[0],'crashdata.csv'))
logging.basicConfig(filename=os.path.join(path,folders[1],'sample.log'),level=logging.DEBUG)
start = dt.date.today()
logging.info('As for %s raw data set contains %s records ...' % (dt.datetime.strftime(start,"%m/%d/%Y %H:%M:%S")
,df.shape[0]))
end = dt.date.today()-dt.timedelta(days=n)
df_new = df[(df.DATE >= end) & (df.DATE <= start)]
df_new.to_csv(os.path.join(path,folders[1],'%sdays_crashdata.csv' %(n)), index=False)
logging.info('Raw data set for the last %s days contains %s records' % (n, df_new.shape[0]))
# n = 150 days
if __name__ == "__main__":
sample_crash_data(150,dpath,['rawdata','data'])
| mit |
typpo/asterank | data/pipeline/run/00_deltav/deltav.py | 1 | 3826 | """
Calculates delta-v for asteroids according to Shoemaker and Helin (1978)
See
http://echo.jpl.nasa.gov/~lance/delta_v/delta_v.rendezvous.html
http://echo.jpl.nasa.gov/~lance/delta_v/deltav.13.pl as well
"""
import sys
import numpy as np
import operator
import pandas as pp
if len(sys.argv) < 3:
DATA_PATH = 'data/latest_sbdb.csv'
DV_TEST_PATH = 'data/deltav/db.csv'
OUTPUT_PATH = 'data/deltav/db2.csv'
else:
DATA_PATH = sys.argv[1]
DV_TEST_PATH = sys.argv[2]
OUTPUT_PATH = sys.argv[3]
print 'Reading', DATA_PATH, '...'
df = pp.read_csv(DATA_PATH, index_col='pdes')
df.i = df.i * np.pi / 180 # inclination in radians
df['Q'] = df.a * (1.0 + df.e) # aphelion
def AtensDeltaV(df):
"""Delta V calculation for Atens asteroids, where a < 1."""
df['ut2'] = 2 - 2*np.cos(df.i/2)*np.sqrt(2*df.Q - df.Q**2)
df['uc2'] = 3/df.Q - 1 - (2/df.Q)*np.sqrt(2 - df.Q)
df['ur2'] = 3/df.Q - 1/df.a - (
(2/df.Q)*np.cos(df.i/2)*np.sqrt(df.a*(1-df.e**2)/df.Q))
return df
def ApollosDeltaV(df):
"""Delta V calculation for Apollo asteroids, where q <= 1, a >= 1."""
df['ut2'] = 3 - 2/(df.Q + 1) - 2*np.cos(df.i/2)*np.sqrt(2*df.Q/(df.Q+1))
df['uc2'] = 3/df.Q - 2/(df.Q+1) - (2/df.Q)*np.sqrt(2/(df.Q+1))
df['ur2'] = 3/df.Q - 1/df.a - (
(2/df.Q)*np.cos(df.i/2)*np.sqrt((df.a/df.Q)*(1-df.e**2)))
return df
def AmorsDeltaV(df):
"""Delta V calculation for Amors asteroids, where q > 1 and a >= 1."""
df['ut2'] = 3 - 2/(df.Q+1) - 2*np.cos(df.i/2)*np.sqrt(2*df.Q/(df.Q+1))
df['uc2'] = 3/df.Q - 2/(df.Q+1) - (
(2/df.Q)*np.cos(df.i/2)*np.sqrt(2/(df.Q+1)))
df['ur2'] = 3/df.Q - 1/df.a - (2/df.Q)*np.sqrt(df.a*(1-df.e**2)/df.Q)
return df
atens = AtensDeltaV(df[df.a < 1])
apollos = ApollosDeltaV(df[(df.q <= 1) & (df.a >= 1)])
amors = AmorsDeltaV(df[(df.q > 1) & (df.a >= 1)])
df = pp.concat((atens, apollos, amors))
v_earth = 29.784 # earth orbital velocity
U0 = 7.727 / v_earth; # Normalized LEO velocity @ 300km
S = np.sqrt(2) * U0 # Normalied escape velocity from LEO
# Impulse for leaving LEO.
df['ul'] = np.sqrt(df.ut2 + S**2) - U0
# Impulse for rendevouzing at asteroid.
df['ur'] = np.sqrt(df.uc2 - (
2*np.sqrt(df.ur2*df.uc2)*np.cos(df.i/2)) + df.ur2)
# Figure of merit, from Shoemaker and Helin.
df['F'] = df.ul + df.ur
# Delta V.
df['dv'] = (30*df.F) + .5
# Import Benner's delta v calculations.
print 'Reading', DV_TEST_PATH, '...'
df_test = pp.read_csv(DV_TEST_PATH, index_col='pdes')
results = df.join(df_test, how='inner', rsuffix='_benner')
results['dv_diff'] = (np.abs(results.dv - results.dv_benner) /
results.dv_benner)
print('\n\n% deviation from known delta-vs:')
print(results.dv_diff.describe())
print('\n\n% deviation for Atens:')
print(results[results.a < 1].dv_diff.describe())
print('\n\n% deviation for Apollos:')
print(results[(results.q <= 1) & (results.a >= 1)].dv_diff.describe())
print('\n\n% deviation for Amors:')
print(results[(results.q > 1) & (results.a >= 1)].dv_diff.describe())
print('\n\n30 asteroids with highest error:')
outliers = results.sort_values(by=['dv_diff'])[-30:]
for pdes, row in outliers.iterrows():
print('%s \t %.3f km/s (expected %.3f km/s) (error %%%.2f)' % (
pdes, row['dv'], row['dv_benner'], row['dv_diff']*100))
df = df.sort_values(by=['dv'])
print('\n\n30 asteroids with lowest delta-v:')
for pdes, row in df[:30].iterrows():
print('%s \t%.3f km/s' % (pdes, row['dv']))
print '\nWriting results to', OUTPUT_PATH
#df.to_csv(OUTPUT_PATH, cols=('dv',))
f = open(OUTPUT_PATH, 'w')
f.write('pdes,dv\n')
for pdes, row in df.iterrows():
f.write('%s,%f\n' % (pdes, row['dv']))
"""
if full_name.find('Klio') > -1:
print full_name
print row
name = row['pdes'] if row['pdes'] != '' else full_name
f.write('%s,%f\n' % (name, row['dv']))
"""
f.close()
| mit |
Diviyan-Kalainathan/causal-humans | Cause-effect/model_comparison/generate_test_set.py | 1 | 2620 | """
Generation of the test set, w/ permuted vals
Author : Diviyan Kalainathan
Date : 11/10/2016
"""
#ToDo Multiprocess program?
import os,sys
import pandas as pd
from random import randint
from random import shuffle
import csv
from multiprocessing import Pool
import numpy
from sklearn import metrics
inputdata='../output/obj8/pca_var/cluster_5/pairs_c_5.csv'
outputfolder='../output/test/'
info='../output/obj8/pca_var/cluster_5/publicinfo_c_5.csv'
max_proc=int(sys.argv[1])
max_gen=10
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
def chunk_job(chunk,part_no):
chunk = pd.merge(chunk, publicinfo, on='SampleID')
chunk.dropna(how='all')
chunk['Pairtype'] = 'O'
# Drop flags:
chunk = chunk[chunk.SampleID.str.contains('flag')==False]
to_add = []
for index, row in chunk.iterrows():
var1 = row['A']
var2 = row['B']
for i in range(max_gen):
mode = randint(1, 3)
if mode == 1:
var1 = var1.split()
shuffle(var1)
var1 = " ".join(str(j) for j in var1)
elif mode == 2:
var2 = var2.split()
shuffle(var2)
var2 = " ".join(str(j) for j in var2)
elif mode == 3:
var1 = var1.split()
shuffle(var1)
var1 = " ".join(str(j) for j in var1)
var2 = var2.split()
shuffle(var2)
var2 = " ".join(str(j) for j in var2)
to_add.append([row['SampleID'] + '_Probe' + str(i), var1, var2, row['A-Type'], row['B-Type'], 'P'])
df2 = pd.DataFrame(to_add, columns=['SampleID', 'A', 'B', 'A-Type', 'B-Type', 'Pairtype'])
chunk=pd.concat([chunk,df2],ignore_index=True)
sys.stdout.write('Finishing chunk '+str(part_no)+'\n')
sys.stdout.flush()
# chunk= chunk.iloc[numpy.random.permutation(len(chunk))] #No need to shuffle
# chunk.reset_index(drop=True)
chunk.to_csv(outputfolder + 'test_crit_p' + str(part_no) + '.csv', sep=';',index=False)
outputfile = open(outputfolder + 'test_crit_.csv', 'wb') # Create file
writer = csv.writer(outputfile, delimiter=';', lineterminator='\n')
writer.writerow(['SampleID', 'A', 'B', 'Pairtype'])
outputfile.close()
publicinfo=pd.read_csv(info,sep=';')
publicinfo.columns=['SampleID','A-Type','B-Type']
chunksize=10**4
data=pd.read_csv(inputdata,sep=';', chunksize=chunksize)
print(chunksize)
pool=Pool(processes=max_proc)
partno=0
for chunk in data:
partno+=1
pool.apply_async(chunk_job,args=(chunk,partno,))
pool.close()
pool.join()
| mit |
evanbiederstedt/RRBSfun | scripts/repeat_finder_scripts/faster_repeats/temp_all.py | 1 | 1358 | import glob
import pandas as pd
import numpy as np
df1 = pd.read_csv("repeats_hg19.csv")
RRBS_files = glob.glob("RRBS*")
df_dict = {group : df for group, df in df1.groupby(by="chr")}
# In[11]:
from numpy import nan
def between_range(row, group_dict):
# get sub dataframe from dictionary, if key not found return nan
subset = group_dict.get(row['chr'], None)
if subset is None:
return ''
# slice subset to get even smaller group
subset = subset.loc[subset['start'] <= row['start'], :]
# check if subset has any values
if subset.empty:
return ''
# now slice the second time and return only a series of 'labels'
subset = subset.loc[subset['end'] >= row['start'], 'repeat_class']
# now you need to decide what to do if there are more than one labels, i have only taken here the first one
# return the first label, if empty return nan
if subset.empty:
return ''
else:
return subset.iloc[0]
# In[12]:
from functools import partial
from time import time
between_range_partial = partial(between_range, group_dict=df_dict)
cols = ['chr', 'start']
for filename in RRBS_files:
df2 = pd.read_csv(filename, sep="\t")
labels = df2.loc[:, cols].apply(between_range_partial, axis=1)
df2["repeat_class"] = labels
df2.to_csv(filename, sep='\t', index=False)
| mit |
NMTHydro/Recharge | utils/raster_stacker_jan.py | 1 | 8631 | # ===============================================================================
# Copyright 2016 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
import rasterio
from rasterio import windows
import numpy as np
from numpy import meshgrid, arange
from affine import Affine
from pyproj import Proj, transform
from utils.pixel_coord_finder import coord_getter
from recharge.raster_tools import convert_raster_to_array, convert_array_to_raster
import seaborn as sns
import matplotlib.pyplot as plt
import multiprocessing as mp
from utils.tracker_plot import grapher, get_columns
import pandas as pd
# ============= local library imports ===========================
def data_frame_formatter(raster_dictionary):
"""
:param raster_dictionary:
:return: saved csvs
Each raster will get written into a dict with certain info, then the dict will get turned into a
dataframe and the dataframes will all get merged into one. This will then be exported and saved as a csv.
Cool? Cool.
dict = {column heading : [list of values]}
cols = x(easting), y(northing), nlcd, ETrF, NDVI
"""
ras_dict = {}
print "x, y is done. Starting k, v loop"
for k, v in raster_dictionary.iteritems():
#=======
#print v[0]
#index = np.argwhere(v[0]==0)
#non_zero_vals = v[0][v[0] != 0] #v[0].delete(0, index)
#print y
#========
ras_dict["{}".format(k)] = v[0].ravel().tolist() #v[0].tolist()
print "Done wi kv loop"
# col_list = ["x", "y"]
col_list = []
for key in ras_dict.keys():
col_list.append(key)
df = pd.DataFrame(ras_dict, columns=col_list)
print "Done with main Dataframe"
# AGRICULTURE
# This way you get rid of the zero values associated with the unmasked areas.
df_ag = df[df['jan_ag_mask_from_maskjanaoi_20110818']>0]
# Filter out slope values greater than three
df_ag = df_ag[df_ag['slope_degree_20110818']<3]
df_ag.to_csv("/Volumes/SeagateExpansionDrive/jan_metric/jan_comparison_ag.csv")
print "Done writing ag data frame"
# NATURAL AREAS
# This way you get rid of the zero values associated with the unmasked areas.
df_nat = df[df['jan_natural_mask_from_naturalareasmask_20110818']>0]
# Filter out slope values greater than three
df_nat = df_nat[df_nat['slope_degree_20110818']<3]
df_nat.to_csv("/Volumes/SeagateExpansionDrive/jan_metric/jan_comparison_nat.csv")
print "Done writing natural data frame"
#============================
# TODO - Fix hard-coding here
# df_filter = df.loc[df['nlcd_align_path35_dixon_wy'].isin([81, 80])]
# df_filter.to_csv("/Volumes/SeagateExpansionDrive/jan_metric/for_stacking/stack_csv.csv")
# TODO - and here
# y = df_filter.ix[:, 'LT50350312011166PAC01_ETrF']
# x = df_filter.ix[:, "LT50350312011166PAC01_NDVI"]
### IF doing at-large NLCD filtering
# df = df[df['aligned_nlcd_full_warp_near_clip_3336']>80 | df['aligned_nlcd_full_warp_near_clip_3336']< 81]
# ============================
# TODO - comment out if using nlcd filter at-large
# AGRICULTURAL
ETrF_ag = df_ag.ix[:, "etrf24_20110818"]
ETa_ag = df_ag.ix[:, "et24_20110818"]
# NATURAL
ETrF_nat = df_nat.ix[:, "etrf24_20110818"]
ETa_nat = df_nat.ix[:, "et24_20110818"]
# =======GRAPHING PORTION=======
# AGRICULTURAL
ETrF_ag_hist = plt.figure()
aa = ETrF_ag_hist.add_subplot(111)
aa.set_title('Jan Metric 2011 August 18 - ETrF Agricultural Pixels', fontweight='bold')
aa.set_xlabel('ETrF', style='italic')
aa.set_ylabel('Frequency', style='italic')
aa.hist(ETrF_ag, bins=20)
plt.tight_layout()
plt.savefig(
"/Volumes/SeagateExpansionDrive/jan_metric/plots/non_aligned_plots/Histograms/janmetric/ETrF_ag_hist_janmetric.pdf")
ETa_ag_hist = plt.figure()
aa = ETa_ag_hist.add_subplot(111)
aa.set_title('Jan Metric 2011 August 18 - ETa_ag_hist Agricultural Pixels', fontweight='bold')
aa.set_xlabel('ETa', style='italic')
aa.set_ylabel('Frequency', style='italic')
aa.hist(ETa_ag, bins=20)
plt.tight_layout()
plt.savefig(
"/Volumes/SeagateExpansionDrive/jan_metric/plots/non_aligned_plots/Histograms/janmetric/ETa_ag_hist_janmetric.pdf")
# NATURAL
ETrF_nat_hist = plt.figure()
aa = ETrF_nat_hist.add_subplot(111)
aa.set_title('Jan Metric 2011 August 18 - ETrF Natural Pixels', fontweight='bold')
aa.set_xlabel('ETrF', style='italic')
aa.set_ylabel('Frequency', style='italic')
aa.hist(ETrF_nat, bins=20)
plt.tight_layout()
plt.savefig(
"/Volumes/SeagateExpansionDrive/jan_metric/plots/non_aligned_plots/Histograms/janmetric/ETrF_nat_hist_janmetric.pdf")
ETa_nat_hist = plt.figure()
aa = ETa_nat_hist.add_subplot(111)
aa.set_title('Jan Metric 2011 August 18 - ETa_nat_hist Natural Pixels', fontweight='bold')
aa.set_xlabel('ETa', style='italic')
aa.set_ylabel('Frequency', style='italic')
aa.hist(ETa_nat, bins=20)
plt.tight_layout()
plt.savefig(
"/Volumes/SeagateExpansionDrive/jan_metric/plots/non_aligned_plots/Histograms/janmetric/ETa_nat_hist_janmetric.pdf")
def run():
# TODO - update at each use
drive_path = os.path.join('/', 'Volumes', 'SeagateExpansionDrive', )
#tiff_path = os.path.join(drive_path, "jan_metric", 'for_stacking', 'aligned_nlcd_full_warp_near_clip_3336.tif')
stack_location = os.path.join(drive_path, "jan_metric", 'stacking_histogram_jan')
#x, y = coord_getter(tiff_path)
# print "x", x
#
# print "y", y
#### find the right window to use.
# First get the minimum raster extent.
comparison_list = []
comparison_dict = {}
for directory_path, subdir, file in os.walk(stack_location, topdown=False):
for tf in file:
if tf.endswith(".tif"):
tiff_path = os.path.join(directory_path, tf)
with rasterio.open(tiff_path) as src:
ras = src.read(1)
# raster.shape -> (###,###)
#
# raster.shape[1] raster.shape[0]
comparison_list.append(ras.shape[0]*ras.shape[1])
comparison_dict["{}".format(ras.shape[0]*ras.shape[1])] = tiff_path
# get the minimum dimensions raster.
val = min(comparison_list)
min_raster_path = comparison_dict["{}".format(val)]
print (min_raster_path)
with rasterio.open(min_raster_path) as raster:
ras = raster.read(1)
print 'ras shape 0', ras.shape[0]
print 'ras shape 1', ras.shape[1]
window = ((0, ras.shape[0]), (0, ras.shape[1]))
print "WINDOW", window
bounds = raster.window_bounds(window)
print "BOUNDS", bounds
# Take the bounds from the minimum raster and for each raster in the dir,
# get the correct window to be read in for the dict using the bounds from the min raster.
raster_dict = {}
window_lst = []
for directory_path, subdir, file in os.walk(stack_location, topdown=False):
for tf in file:
if tf.endswith(".tif") or tf.endswith(".img"):
tiff_path = os.path.join(directory_path, tf)
#print 'path', tiff_path
with rasterio.open(tiff_path) as r:
T0 = r.affine # upper-left pixel corner affine transform
print T0
window = r.window(*bounds)
print "edited window", window
A = r.read(1, window=window)
print "A", A
print "A shape", A.shape
print 'path', tiff_path
raster_dict['{}'.format(tf.split(".")[0])] = (A, tiff_path)
print 'raster dict', raster_dict
print "Starting the formatter"
data_frame_formatter(raster_dict)
if __name__ == "__main__":
run() | apache-2.0 |
Karuntg/SDSS_SSC | analysis/processLCs.py | 1 | 6459 | # Given a file with directory and LC file information,
# loop over LCs and do some computations.
# Libraries
import os, sys, math
import numpy as np
from astropy.table import Table
from astropy.io import ascii
from matplotlib import pyplot as plt
from astroML.plotting import scatter_contour
from astroML.plotting import hist as histML
def processLC(LCfile):
# read into astropy Table
data = ascii.read(LCfile)
rmag = data['col31']
rerr = data['col36']
tai = data['col51'] + data['col56']
Ndata = rmag.size
### compute and return
# dT = taiMax - taiMin
# min, max, rms, and chi2 for the r band
dT = np.max(tai) - np.min(tai)
rmed = np.median(rmag)
# robust standard deviation
rms = 0.7413*(np.percentile(rmag, 75)-np.percentile(rmag, 25))
# robust chi2dof
chi = (rmag-rmed)/rerr
chi2 = 0.7413*(np.percentile(chi, 75)-np.percentile(chi, 25))
### eventualy, run Lomb-Scargle here
# bestPeriods = LombScargle(tai, rmag)
return np.min(tai), dT, Ndata, rmed, rms, chi2
def getLCdata(LCfile):
# read into astropy Table
data = ascii.read(LCfile)
rmag = data['col31']
rerr = data['col36']
tai = data['col51'] + data['col56']
return rmag, rerr
# given vectors x and y, fit medians in bins from xMin to xMax, with Nbin steps,
# and return xBin, medianBin, medianErrBin
def fitMedians(x, y, xMin, xMax, Nbin, verbose=1):
# first generate bins
xEdge = np.linspace(xMin, xMax, (Nbin+1))
xBin = np.linspace(0, 1, Nbin)
nPts = 0*np.linspace(0, 1, Nbin)
medianBin = 0*np.linspace(0, 1, Nbin)
sigGbin = -1+0*np.linspace(0, 1, Nbin)
for i in range(0, Nbin):
xBin[i] = 0.5*(xEdge[i]+xEdge[i+1])
yAux = y[(x>xEdge[i])&(x<=xEdge[i+1])]
if (yAux.size > 0):
nPts[i] = yAux.size
medianBin[i] = np.median(yAux)
# robust estimate of standard deviation: 0.741*(q75-q25)
sigmaG = 0.741*(np.percentile(yAux,75)-np.percentile(yAux,25))
# uncertainty of the median: sqrt(pi/2)*st.dev/sqrt(N)
sigGbin[i] = np.sqrt(np.pi/2)*sigmaG/np.sqrt(nPts[i])
else:
nPts[i], medianBin[i], sigGBin[i] = 0
if (verbose):
print('median:', np.median(medianBin[Npts>0]))
return xBin, nPts, medianBin, sigGbin
def analyzeSTATSfile(statsFile, outdir):
"""Make a 4-panel plot illustrating basic stats"""
# Ndata histogram
# rchi2 histogram
# rrms vs. rmed
# rchi2 vs. rmed
# <TAImin dT Ndata rmed rrms rchi2>
dataT = np.loadtxt(statsFile, skiprows=1, usecols = (0,1,2,3,4,5))
data = dataT.transpose(1,0)
TAImin = data[0]
dT = data[1]
Ndata = data[2]
rmag = data[3]
rrms = data[4]
rchi2 = data[5]
### PLOTTING ###
plot_kwargs = dict(color='k', linestyle='none', marker='.', markersize=1)
plt.subplots_adjust(bottom=0.10, top=0.92, left=0.11, right=0.9, wspace=0.41, hspace=0.32)
ax1 = plt.subplot(2,2,1)
hist, bins = np.histogram(Ndata, bins=60)
center = (bins[:-1]+bins[1:])/2
ax1.plot(center, hist, drawstyle='steps')
#histML(Ndata, bins='knuth', ax=ax1, histtype='stepfilled', ec='k', fc='#AAAAAA')
ax1.set_xlim(0, 60)
ax1.set_ylim(0, 1.1*np.max(hist))
ax1.set_xlabel(r'$\mathrm{Ndata}$')
ax1.set_ylabel(r'$\mathrm{dN/dNdata}$')
ax1.set_title('Standard stars from SDSS Stripe 82')
rchi2ok = rchi2[rchi2<3.0]
hist, bins = np.histogram(rchi2ok, bins=50)
center = (bins[:-1]+bins[1:])/2
ax2 = plt.subplot(2,2,2)
ax2.plot(center, hist, drawstyle='steps')
ax2.set_xlim(0, 3.0)
ax2.set_ylim(0, 1.1*np.max(hist))
ax2.set_xlabel(r'$\mathrm{\chi^2_{dof}(r)}$')
ax2.set_ylabel(r'$\mathrm{dN/d\chi^2_{dof}}$')
ax3 = plt.subplot(2,2,3)
ax3.set_xlim(13, 23)
ax3.set_ylim(0, 0.1)
ax3.set_xlabel(r'$\mathrm{rMag}$')
ax3.set_ylabel(r'$\mathrm{rRms}$')
if (0):
ax3.scatter(rmag, rrms, s=3, alpha=0.5)
else:
col0 = rmag
col1 = rrms
# 2D-histogram
im3 = ax3.hexbin(col0, col1, bins='log', cmap=plt.cm.viridis,
mincnt=1, extent=(13, 22.0, 0, 0.1))
# color bar
# cb = plt.colorbar(im,label='log(N)')
ax4 = plt.subplot(2,2,4)
ax4.set_xlim(13, 22)
ax4.set_ylim(0, 3.0)
ax4.set_xlabel(r'$\mathrm{rMag}$')
ax4.set_ylabel(r'$\mathrm{\chi^2_{dof}(r)}$')
if (0):
ax4.scatter(rmag, rchi2, s=6, alpha=0.5)
else:
col0 = rmag[rchi2<3.0]
col1 = rchi2ok
# 2D-histogram
im4 = ax4.hexbin(col0, col1, bins='log', cmap=plt.cm.viridis,
mincnt=1, extent=(13, 22.0, 0, 3.0))
# color bar
# cb = plt.colorbar(im,label='log(N)')
outfile = outdir + "/statsFile.png"
plt.savefig(outfile)
# plt.show()
return
def processLCs(LCdir):
# Read Karun's file with LC data
filein = LCdir + "/LC_dirs_fils.lst"
fileout = LCdir + "/LC_dirs_fils.stats"
Nlines = 0
NlineMin = 2
NlineMax = 2000001
EpochSum = 0
mssgStep = 1000
fout = open(fileout, "w")
fout.write(" TAImin dT Ndata rmed rrms rchi2 LCfile\n")
with open(filein, "r") as f:
for line in f:
Nlines += 1
if (Nlines == Nlines/mssgStep*mssgStep):
print Nlines
lineS = line.split()
if ((Nlines >= NlineMin) and (Nlines <= NlineMax)):
RAdir = lineS[3]
DECdir = lineS[4]
LCfile = lineS[5]
Nepoch = int(lineS[6])
EpochSum += Nepoch
LCpath = RAdir + "/" + DECdir + "/" + LCfile
LCfullpath = LCdir + "/" + LCpath
# test that it's not a file without data (i.e. only 2 header lines)
lcf = open(LCfullpath, "r")
lines = lcf.readlines()
if (len(lines)>2):
r1, r2, r3, r4, r5, r6 = processLC(LCfullpath)
s = str("%.4f " % r1) + str("%.2f " % r2) + str("%3.0f " % r3) + str("%.3f " % r4)
s = s + str("%.3f " % r5) + str("%6.2f " % r6) + LCpath + "\n"
fout.write(s)
else:
print 'EMPTY FILE: ', LCpath
fout.close()
print 'EpochSum = ', EpochSum
return
if(0):
LCdir = "/Users/ivezic/Work/Science/CalibrationV2/Data/testLCdir"
processLCs(LCdir)
statsfile = LCdir + "/LC_dirs_fils.stats"
analyzeSTATSfile(statsfile, LCdir)
# plot in statsFile_r.png
# 481,000 with 15<r<20 and Ndata>10 (162,000 with Ndata>20)
| gpl-3.0 |
jakeown/GAS | GAS/gauss_fit.py | 1 | 9175 | from spectral_cube import SpectralCube
from astropy.io import fits
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
from scipy.optimize import curve_fit
from scipy import *
import time
import pprocess
from astropy.convolution import convolve
import radio_beam
import sys
def gauss_fitter(region = 'Cepheus_L1251', snr_min = 3.0, mol = 'C2S', vmin = 5.0, vmax=10.0, convolve=False, use_old_conv=False, multicore = 1, file_extension = None):
"""
Fit a Gaussian to non-NH3 emission lines from GAS.
It creates a cube for the best-fit Gaussian, a cube
for the best-fit Gaussian with noise added back into
the spectrum, and a parameter map of Tpeak, Vlsr, and FWHM
Parameters
----------
region : str
Name of region to reduce
snr_min : float
Lowest signal-to-noise pixels to include in the line-fitting
mol : str
name of molecule to fit
vmin : numpy.float
Minimum centroid velocity, in km/s.
vmax : numpy.float
Maximum centroid velocity, in km/s.
convolve : bool or float
If not False, specifies the beam-size to convolve the original map with
Beam-size must be given in arcseconds
use_old_conv : bool
If True, use an already convolved map with name:
region + '_' + mol + file_extension + '_conv.fits'
This convolved map must be in units of km/s
multicore : int
Maximum number of simultaneous processes desired
file_extension: str
filename extension
"""
if file_extension:
root = file_extension
else:
# root = 'base{0}'.format(blorder)
root = 'all'
molecules = ['C2S', 'HC7N_22_21', 'HC7N_21_20', 'HC5N']
MolFile = '{0}/{0}_{2}_{1}.fits'.format(region,root,mol)
ConvFile = '{0}/{0}_{2}_{1}_conv.fits'.format(region,root,mol)
GaussOut = '{0}/{0}_{2}_{1}_gauss_cube.fits'.format(region,root,mol)
GaussNoiseOut = '{0}/{0}_{2}_{1}_gauss_cube_noise.fits'.format(region,root,mol)
ParamOut = '{0}/{0}_{2}_{1}_param_cube.fits'.format(region,root,mol)
# Load the spectral cube and convert to velocity units
cube = SpectralCube.read(MolFile)
cube_km = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
# If desired, convolve map with larger beam
# or load previously created convolved cube
if convolve:
cube = SpectralCube.read(MolFile)
cube_km_1 = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
beam = radio_beam.Beam(major=convolve*u.arcsec, minor=convolve*u.arcsec, pa=0*u.deg)
cube_km = cube_km_1.convolve_to(beam)
cube_km.write(ConvFile, format='fits', overwrite=True)
if use_old_conv:
cube_km = SpectralCube.read(ConvFile)
# Define the spectral axis in km/s
spectra_x_axis_kms = np.array(cube_km.spectral_axis)
# Find the channel range corresponding to vmin and vmax
# -- This is a hold-over from when I originally set up the code to
# use a channel range rather than velocity range.
# Can change later, but this should work for now.
low_channel = np.where(spectra_x_axis_kms<=vmax)[0][0]+1 # Add ones to change index to channel
high_channel = np.where(spectra_x_axis_kms>=vmin)[0][-1]+1 # Again, hold-over from older setup
peak_channels = [low_channel, high_channel]
# Create cubes for storing the fitted Gaussian profiles
# and the Gaussians with noise added back into the spectrum
header = cube_km.header
cube_gauss = np.array(cube_km.unmasked_data[:,:,:])
cube_gauss_noise = np.array(cube_km.unmasked_data[:,:,:])
shape = np.shape(cube_gauss)
# Set up a cube for storing fitted parameters
param_cube = np.zeros(6, shape[1], shape[2])
param_header = cube_km.header
# Define the Gaussian profile
def p_eval(x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
# Create some arrays full of NANs
# To be used in output cubes if fits fail
nan_array=np.empty(shape[0]) # For gauss cubes
nan_array[:] = np.NAN
nan_array2=np.empty(param_cube.shape[0]) # For param cubes
nan_array2[:] = np.NAN
# Loop through each pixel and find those
# with SNR above snr_min
x = []
y = []
pixels = 0
for (i,j), value in np.ndenumerate(cube_gauss[0]):
spectra=np.array(cube_km.unmasked_data[:,i,j])
if (False in np.isnan(spectra)):
rms = np.nanstd(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
if (max(spectra[peak_channels[0]:peak_channels[1]]) / rms) > snr_min:
pixels+=1
x.append(i)
y.append(j)
else:
cube_gauss[:,i,j]=nan_array
param_cube[:,i,j]=nan_array2
cube_gauss_noise[:,i,j]=nan_array
print str(pixels) + ' Pixels above SNR=' + str(snr_min)
# Define a Gaussian fitting function for each pixel
# i, j are the x,y coordinates of the pixel being fit
def pix_fit(i,j):
spectra = np.array(cube_km.unmasked_data[:,i,j])
# Use the peak brightness Temp within specified channel
# range as the initial guess for Gaussian height
max_ch = np.argmax(spectra[peak_channels[0]:peak_channels[1]])
Tpeak = spectra[peak_channels[0]:peak_channels[1]][max_ch]
# Use the velocity of the brightness Temp peak as
# initial guess for Gaussian mean
vpeak = spectra_x_axis_kms[peak_channels[0]:peak_channels[1]][max_ch]
rms = np.std(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
err1 = np.zeros(shape[0])+rms
# Create a noise spectrum based on rms of off-line channels
# This will be added to best-fit Gaussian to obtain a noisy Gaussian
noise=np.random.normal(0.,rms,len(spectra_x_axis_kms))
# Define initial guesses for Gaussian fit
guess = [Tpeak, vpeak, 0.3] # [height, mean, sigma]
try:
coeffs, covar_mat = curve_fit(p_eval, xdata=spectra_x_axis_kms, ydata=spectra, p0=guess, sigma=err1, maxfev=500)
gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))
noisy_gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))+noise
params = np.append(coeffs, (covar_mat[0][0]**0.5, covar_mat[1][1]**0.5, covar_mat[2][2]**0.5))
# params = ['Tpeak', 'VLSR','sigma','Tpeak_err','VLSR_err','sigma_err']
# Don't accept fit if fitted parameters are non-physical or too uncertain
if (params[0] < 0.01) or (params[3] > 1.0) or (params[2] < 0.05) or (params[5] > 0.5) or (params[4] > 0.75):
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
# Don't accept fit if the SNR for fitted spectrum is less than SNR threshold
#if max(gauss)/rms < snr_min:
# noisy_gauss = nan_array
# gauss = nan_array
# params = nan_array2
except RuntimeError:
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
return i, j, gauss, params, noisy_gauss
# Parallel computation:
nproc = multicore # maximum number of simultaneous processes desired
queue = pprocess.Queue(limit=nproc)
calc = queue.manage(pprocess.MakeParallel(pix_fit))
tic=time.time()
counter = 0
# Uncomment to see some plots of the fitted spectra
#for i,j in zip(x,y):
#pix_fit(i,j)
#plt.plot(spectra_x_axis_kms, spectra, color='blue', drawstyle='steps')
#plt.plot(spectra_x_axis_kms, gauss, color='red')
#plt.show()
#plt.close()
# Begin parallel computations
# Store the best-fit Gaussians and parameters
# in their correct positions in the previously created cubes
for i,j in zip(x,y):
calc(i,j)
for i,j,gauss_spec,parameters,noisy_gauss_spec in queue:
cube_gauss[:,i,j]=gauss_spec
param_cube[:,i,j]=parameters
cube_gauss_noise[:,i,j]=noisy_gauss_spec
counter+=1
print str(counter) + ' of ' + str(pixels) + ' pixels completed \r',
sys.stdout.flush()
print "\n %f s for parallel computation." % (time.time() - tic)
# Save final cubes
# These will be in km/s units.
# Spectra will have larger values to the left, lower values to right
cube_final_gauss = SpectralCube(data=cube_gauss, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss.write(GaussOut, format='fits', overwrite=True)
cube_final_gauss_noise = SpectralCube(data=cube_gauss_noise, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss_noise.write(GaussNoiseOut, format='fits', overwrite=True)
# Construct appropriate header for param_cube
param_header['NAXIS3'] = len(nan_array2)
param_header['WCSAXES'] = 3
param_header['CRPIX3'] = 1
param_header['CDELT3'] = 1
param_header['CRVAL3'] = 0
param_header['PLANE1'] = 'Tpeak'
param_header['PLANE2'] = 'VLSR'
param_header['PLANE3'] = 'sigma'
param_header['PLANE5'] = 'Tpeak_err'
param_header['PLANE6'] = 'VLSR_err'
param_header['PLANE7'] = 'sigma_err'
fits.writeto(ParamOut, param_cube, header=param_header, clobber=True)
### Examples ###
# Fit the HC5N data in Cepheus_L1251, without convolution
#gauss_fitter(region = 'Cepheus_L1251', snr_min = 7.0, mol = 'HC5N', vmin=-6.3, vmax=-2.2, multicore=3)
# Convolve the HC5N data in Cepheus_L1251 to a spatial resolution of 64 arcseconds,
# then fit a Gaussian to all pixels above SNR=3
#gauss_fitter(region = 'Cepheus_L1251', direct = '/Users/jkeown/Desktop/GAS_dendro/', snr_min = 3.0, mol = 'HC5N', peak_channels = [402,460], convolve=64., use_old_conv=False)
| mit |
facebookresearch/ParlAI | parlai/crowdsourcing/tasks/turn_annotations_static/analysis/compile_results.py | 1 | 23052 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
from datetime import datetime
from typing import Any, Dict, Optional, Tuple
import numpy as np
import pandas as pd
from parlai.crowdsourcing.utils.analysis import AbstractTurnAnnotationResultsCompiler
class TurnAnnotationsStaticResultsCompiler(AbstractTurnAnnotationResultsCompiler):
"""
Class to compile results from static turn annotations.
Change PROBLEM_BUCKETS in task_config/annotation_buckets.json to be the buckets that
you are asking crowdsource workers to annotate with.
"""
NUM_SUBTASKS = 7
LIVE_ONBOARDING_IS_LAST_SUBTASK = True
LIVE_ONBOARDING_THRESHOLD = 0.5
INFLIGHT_ONBOARDING_DATA = None
NUM_ANNOTATIONS = 5
FILENAME_STUB = 'results'
CALCULATE_STATS_INTERANNOTATOR_AGREEMENT = True
@classmethod
def setup_args(cls):
parser = super().setup_args()
parser.add_argument(
'--onboarding-in-flight-data-file',
type=str,
help='Path to JSONL file containing onboarding in-flight conversations',
)
parser.add_argument(
'--gold-annotations-file',
type=str,
default=None,
help='Path to a JSON file mapping utterance IDs to the gold annotations',
)
return parser
def __init__(self, opt: Dict[str, Any]):
super().__init__(opt)
self.onboarding_in_flight_data_file = opt.get('onboarding_in_flight_data_file')
self.gold_annotations_file = opt.get('gold_annotations_file')
def get_data_paths_mephisto(self, task_run_id_folder):
"""
Get all the individual folders with data from the <task_run_id> path we are
given as input.
In Mephisto the structure is:
/<project_id>/<task_run_id>/<assignment_id>/<agent_id>/
Side note: assignment_id == HIT ID
"""
# TODO: replace direct folder access with a call to
# mephisto.tools.data_browser.DataBrowser
read_folders = []
for assignment_id in os.listdir(task_run_id_folder):
if assignment_id in ['onboarding', 'reservations', 'build', '.', '..']:
continue
assignment_folder = os.path.join(task_run_id_folder, assignment_id)
if os.path.isdir(assignment_folder):
if len(os.listdir(assignment_folder)) > 2:
print(
f'Had more than one HIT in folder: {assignment_folder}, had {len(os.listdir(assignment_folder))} folders.'
)
for agent_id in os.listdir(assignment_folder):
if os.path.isdir(os.path.join(assignment_folder, agent_id)):
full_path = os.path.join(
task_run_id_folder,
assignment_id,
agent_id,
'agent_data.json',
)
read_folders.append(full_path)
return read_folders
def get_results_path_base(self) -> str:
now = datetime.now()
return os.path.join(
self.output_folder, f'{self.FILENAME_STUB}_{now.strftime("%Y%m%d_%H%M%S")}'
)
def compile_results(self) -> pd.DataFrame:
# Loads data from files and gets rid of incomplete or malformed convos
conversations = self.compile_initial_results(self.results_folders)
master_dataframe = self.process_data_into_dataframe(conversations)
self.calculate_basic_interannotator_agreement(master_dataframe)
if self.gold_annotations_file is not None:
with open(self.gold_annotations_file, 'r') as gold_f:
gold_annotations = json.loads(gold_f.read())
self.calculate_agreement_with_gold_annotations(
gold_annotations, master_dataframe
)
if self.CALCULATE_STATS_INTERANNOTATOR_AGREEMENT:
self.calculate_stats_interannotator_agreement(master_dataframe)
return master_dataframe
def _validate_hit(self, hit_data) -> Tuple[bool, Optional[str]]:
"""
Validate an entire HIT.
:return: tuple (is_valid, reason)
"""
if 'outputs' not in hit_data or hit_data['outputs'] is None:
return False, 'Malformed HIT'
subtasks = hit_data['outputs']['final_data']
if len(subtasks) != self.NUM_SUBTASKS:
return False, f'Incomplete HIT with subtask length {len(subtasks)}.'
return True, None
def _validate_subtask(self, subtask_data) -> Tuple[bool, Optional[str]]:
"""
Validate a conversation subtask within the HIT.
:return: tuple (is_valid, reason)
"""
# Check that the conversation consists of pairs of comments between
# agents 0 and 1, with 0 speaking first
try:
assert all(
[
utterance_data['agent_idx'] == turn_idx % 2
for turn_idx, utterance_data in enumerate(subtask_data)
]
)
messages_0 = [utt for utt in subtask_data if utt['agent_idx'] == 0]
messages_1 = [utt for utt in subtask_data if utt['agent_idx'] == 1]
assert len(messages_0) + len(messages_1) == len(subtask_data)
except Exception:
return False, f'Data not in form expected. Length is: {len(subtask_data)}'
for utterance_data in subtask_data:
if (
utterance_data['agent_idx'] == 1
and self.problem_buckets[0] not in utterance_data
):
return (
False,
f'Bot utterance was malformed and had no problem annotation fields (Failed to find key: {self.problem_buckets[0]}).',
)
return True, None
def _get_inflight_onboarding_success_from_subtask(self, subtask):
if self.INFLIGHT_ONBOARDING_DATA is None:
self.INFLIGHT_ONBOARDING_DATA = self.setup_inflight_onboarding_data()
onboarding_utterance = subtask['data'][-1]
num_answers = 0
num_correct = 0
num_incorrect = 0
for d in self.INFLIGHT_ONBOARDING_DATA:
if d['dialog'][-1][-1]['text'] == onboarding_utterance['text']:
num_answers = len(d['answers'])
for pb in self.problem_buckets:
if pb in d['answers'] and onboarding_utterance[pb]:
num_correct += 1
if onboarding_utterance[pb] and pb not in d['answers']:
num_incorrect += 1
return num_correct / num_answers
def compile_initial_results(self, results_folders) -> list:
"""
Do initial loading and processing of crowdsource data Loads data from all the
worker ID files and gets rid of incomplete or malformed convos.
Also adds fields such as worker_id, assignment_id, etc for convenience
:return: list of JSON objects which represent a conversation with
annotations d["data"] of each has an array of utterance level data
"""
print('Starting compile_initial_results...')
all_data_paths = []
for f in results_folders:
# Each one is a HIT completed by a given worker (so if
# units-per-assignment > 1), then will include the same conversations
# multiple times annotated by different workers
data_paths = self.get_data_paths_mephisto(f)
all_data_paths.extend(data_paths)
print(f'Got {len(all_data_paths)} folders to read.')
conversations = []
task_completion_times = []
for dp in all_data_paths:
# Read in file
with open(os.path.join(dp), 'rb') as f:
data = json.load(f)
worker_id = dp.split('/')[-2]
hit_id = dp.split('/')[-3]
_ = dp.split('/')[-4] # Task run
(is_valid_hit, reason) = self._validate_hit(data)
if not is_valid_hit:
print(
f'Skipping invalid HIT {hit_id}, worker_id: {worker_id} for reason: {reason}.'
)
continue
# HIT-level metric of HIT completion time has to be done here for now
task_completion_time_seconds = (
data['times']['task_end'] - data['times']['task_start']
)
print(task_completion_time_seconds)
subtasks = data['outputs']['final_data']
if self.LIVE_ONBOARDING_IS_LAST_SUBTASK:
qc_success_pct = self._get_inflight_onboarding_success_from_subtask(
subtasks[-1]
)
else:
qc_success_pct = 0.0
for subtask_idx, d in enumerate(subtasks):
if (
subtask_idx == (len(subtasks) - 1)
and self.LIVE_ONBOARDING_IS_LAST_SUBTASK
):
# Last subtask is inflight onboarding; don't include it
continue
subtask_data = copy.deepcopy(d)
# Structure of each subtask is {'subtask_index': XX, 'data': [...]}
(is_valid_subtask, reason) = self._validate_subtask(d['data'])
if not is_valid_subtask:
print(
f'Skipping invalid subtask within HIT: {hit_id}, worker_id: {worker_id} for reason: {reason}.'
)
continue
subtask_data['worker_id'] = worker_id
subtask_data['hit_id'] = hit_id
subtask_data['folder'] = dp
subtask_data['subtask_idx'] = subtask_idx
experimental_design = 'self_chat'
subtask_data['model_nickname'] = experimental_design + '/' + 'TODO'
subtask_data['qc_success_pct'] = qc_success_pct
conversations.append(subtask_data)
task_completion_times.append(task_completion_time_seconds)
if len(task_completion_times) > 0:
print(
f'Average task completion time (seconds) was: '
f'{np.average(task_completion_times):0.1f}'
)
if len(conversations) == 0:
raise ValueError('No conversations found!')
return conversations
def process_data_into_dataframe(self, conversations) -> pd.DataFrame:
"""
Return one big dataframe of all conversations where a row is an utterance and
its problem annotations.
"""
print('Starting process_data_into_dataframe...')
rows = []
for _, convo in enumerate(conversations):
for turn_idx, utt in enumerate(convo['data']):
row = {
'annotation_id': f'{convo["hit_id"]}_{convo["subtask_idx"]}_{turn_idx}_{convo["worker_id"]}',
'conversation_id': f'{convo["hit_id"]}_{convo["subtask_idx"]}',
'utterance_id': f'{convo["hit_id"]}_{convo["subtask_idx"]}_{turn_idx}',
'turn_idx': turn_idx,
'agent_idx': utt['agent_idx'],
'folder': convo['folder'],
'worker_id': convo['worker_id'],
'hit_id': convo['hit_id'],
'model_nickname': convo['model_nickname'],
'qc_success_pct': convo['qc_success_pct'],
'text': utt['text'],
}
row = self._add_additional_columns(row=row, utt=utt)
for k in self.problem_buckets:
row[k] = utt[k] if utt['agent_idx'] == 1 else ''
rows.append(row)
df = pd.DataFrame(rows)
print(f'Returning master dataframe with {len(df)} annotations.')
return df
def _add_additional_columns(self, row: Dict[str, Any], utt: dict) -> Dict[str, Any]:
"""
Add additional columns to the results dataframe.
If you wish to add additional columns to the results dataframe, use the input
utterance dict `utt` to define new keys in `row`, which will form one row in the
final results dataframe.
"""
_ = utt
# The utt dict isn't used here, but may be used in subclasses.
return row
def calculate_basic_interannotator_agreement(self, df: pd.DataFrame) -> None:
print('Starting calculate_interannotator_agreement...')
# Drops the human utterances which don't have problem buckets
bot_only_df = df.replace('', np.nan)
# Get rid of the None's, which occur if there were no checkboxes (so if
# last utterance only option selected)
bot_only_df = bot_only_df.fillna(value=np.nan)
bot_only_df = bot_only_df.dropna()
bot_only_df = self._problem_bucket_specific_filtering(bot_only_df)
# Group at the utterance level (summing across workers)
bot_only_df = bot_only_df.replace(True, 1)
bot_only_df = bot_only_df.replace(False, 0)
all_bot_annotations_count = len(bot_only_df)
# Remove utterances that don't have self.NUM_ANNOTATIONS annotations
counted_df = bot_only_df.groupby(['utterance_id']).count()
counted_df = counted_df[counted_df == self.NUM_ANNOTATIONS].dropna()
bot_only_df = bot_only_df[bot_only_df['utterance_id'].isin(counted_df.index)]
print(
f'Removed {all_bot_annotations_count - len(bot_only_df)} that did not have annotations by {self.NUM_ANNOTATIONS} workers. {len(bot_only_df)} annotations remaining.'
)
if self.LIVE_ONBOARDING_IS_LAST_SUBTASK:
# Remove those that didn't get enough right on live onboarding
bot_only_df = bot_only_df[
bot_only_df['qc_success_pct'] >= self.LIVE_ONBOARDING_THRESHOLD
]
summed_df = bot_only_df.groupby(['utterance_id']).sum()
print(f'summed_df has length {len(summed_df)}; bot_only_df: {len(bot_only_df)}')
utterance_ids = bot_only_df['utterance_id'].unique()
print(f'Number of unique utterance_ids: {len(utterance_ids)}.')
if 'any_problem' in summed_df:
# We've computed a column marking if any problem exists, so include this
extended_problem_buckets = self.problem_buckets + ['any_problem']
else:
extended_problem_buckets = self.problem_buckets
for k in extended_problem_buckets:
one_annotator = len(summed_df[summed_df[k] == 1])
two_annotators = len(summed_df[summed_df[k] == 2])
three_annotators = len(summed_df[summed_df[k] >= 3])
total_problem_annotations = (
one_annotator + two_annotators + three_annotators
)
total_utterances = len(summed_df[k])
if total_problem_annotations > 0:
print(
f'Bucket: {k}, total unique problem utterances: {total_problem_annotations} ({total_problem_annotations/total_utterances:.1%} of all), one annotator: {one_annotator} ({one_annotator/total_problem_annotations:.1%}), two_annotators: {two_annotators} ({two_annotators/total_problem_annotations:.1%}), three+ annotators: {three_annotators} ({three_annotators/total_problem_annotations:.1%})'
)
def _problem_bucket_specific_filtering(
self, bot_only_df: pd.DataFrame
) -> pd.DataFrame:
"""
Filter the bot responses given the specific problem buckets being used.
"""
non_none_problem_buckets = [
bucket for bucket in self.problem_buckets if bucket != 'none_all_good'
]
assert len(set(non_none_problem_buckets)) + 1 == len(self.problem_buckets)
# Make sure problem buckets are all unique
utterance_count_total = len(bot_only_df)
# Select which columns are consistent
is_consistent = (
bot_only_df[non_none_problem_buckets[0]] + bot_only_df['none_all_good']
) < 2
for bucket in non_none_problem_buckets[1:]:
is_consistent = is_consistent & (
(bot_only_df[bucket] + bot_only_df['none_all_good']) < 2
)
bot_only_df['is_consistent'] = is_consistent
bot_only_df = bot_only_df[bot_only_df['is_consistent']]
# If any of the problem buckets have marked True
any_problem = bot_only_df[non_none_problem_buckets[0]]
for bucket in non_none_problem_buckets[1:]:
any_problem = any_problem | bot_only_df[bucket]
bot_only_df['any_problem'] = any_problem
print(
f'Dropped {utterance_count_total - len(bot_only_df)} inconsistently annotated utterances (none_all_good and a problem bucket). Now have {len(bot_only_df)} utterances.'
)
return bot_only_df
def calculate_agreement_with_gold_annotations(
self, gold_annotations, df: pd.DataFrame
) -> None:
"""
Assume gold_annotations are a dictionary of the form {utterance_id : {bucket_i:
true/false}} where utterance_id is taken from the compile_initial_results (i.e.
mephistohitid_subtaskindex_utteranceidx)
"""
print('Starting calculate_agreement_with_gold_annotations...')
# Drops the human utterances which don't have problem buckets
bot_only_df = df.replace('', np.nan)
# Get rid of the None's, which occur if there were no checkboxes (so if
# last utterance only option selected)
bot_only_df = bot_only_df.fillna(value=np.nan)
bot_only_df = bot_only_df.dropna()
# Include only utterances that have gold_annotations
bot_only_df = bot_only_df[
bot_only_df['utterance_id'].isin(gold_annotations.keys())
]
print(
f'Got {len(gold_annotations.keys())} utterances with gold annotations. Found {len(bot_only_df)} utterances matching gold annotations from DataFrame.'
)
agreement_map = {pb: [] for pb in self.problem_buckets}
agreement_map_problem_only = {pb: [] for pb in self.problem_buckets}
problem_counts = {pb: 0 for pb in self.problem_buckets}
for utterance_id, gold in gold_annotations.items():
utterance_df = bot_only_df[bot_only_df['utterance_id'] == utterance_id]
count_workers = len(utterance_df)
for pb in self.problem_buckets:
gold_annotation = gold[pb]
match_count = utterance_df[utterance_df[pb] == gold[pb]].count()[pb]
a = float(match_count / count_workers)
agreement_map[pb].append(a)
if gold_annotation:
agreement_map_problem_only[pb].append(a)
problem_counts[pb] += 1
print(
f'------------------------\nAverage agreement with {len(gold_annotations)} total gold utterances annotated was:'
)
for pb in self.problem_buckets:
print(
f'{pb}: {np.average(agreement_map[pb]):.1%} ({problem_counts[pb]} gold problem samples)'
)
print('------------------------')
print(
f'------------------------\nAverage agreement problem samples only with {len(gold_annotations)} total gold utterances annotated was:'
)
for pb in self.problem_buckets:
print(
f'{pb}: {np.average(agreement_map_problem_only[pb]):.1%} ({problem_counts[pb]} gold problem samples)'
)
print('------------------------')
def calculate_stats_interannotator_agreement(self, df: pd.DataFrame):
print('Starting calculate_stats_interannotator_agreement...')
# Get rid of the human utterances (non-annotated)
bot_only_df = df.replace('', np.nan)
# Get rid of the None's, which occur if there were no checkboxes (so if
# last utterance only option selected)
bot_only_df = bot_only_df.fillna(value=np.nan)
bot_only_df = bot_only_df.dropna()
print(f'Calculating agreement on {len(bot_only_df)} annotations.')
for pb in self.problem_buckets:
# Expects a df of rater_id, item_id and "data" column
kappa_df = df[['annotation_id', 'worker_id', 'utterance_id', pb]]
kappa_df = kappa_df.rename(
columns={'worker_id': 'rater_id', 'utterance_id': 'item_id', pb: 'data'}
)
try:
fleiss_kappa = self.compute_fleiss_kappa(
kappa_df, [True, False], self.NUM_ANNOTATIONS
)
except Exception as exc:
print(f'Exception calculating Fleiss Kappa: {exc}. Skipping.')
continue
print(f'Fleiss\' kappa for {pb} is: {fleiss_kappa:0.3f}.')
def compute_fleiss_kappa(
self, df: pd.DataFrame, categories: list, number_of_raters: int
) -> float:
"""
Expects a df of index, rater_id, item_id and "data" column with each row a label
of one of the categories.
"""
# categories are "True" and "False"
# As per wikipedia definition: https://en.wikipedia.org/wiki/Fleiss%27_kappa
items_list = df.drop_duplicates(subset=['item_id'])['item_id'].to_list()
N = len(items_list)
p_j = np.zeros(len(categories))
P_bar_sum_term = 0.0
for item in items_list:
df_item_annotations = df[df['item_id'] == item]
if len(df_item_annotations) != number_of_raters:
continue
for j, c in enumerate(categories):
try:
n_ij = df_item_annotations['data'].value_counts()[c]
except Exception:
n_ij = 0.0
p_j[j] += n_ij
P_bar_sum_term += n_ij ** 2
p_j = [tmp / (N * number_of_raters) for tmp in p_j]
P_e_bar = sum([tmp ** 2 for tmp in p_j])
P_bar = (P_bar_sum_term - N * number_of_raters) / (
N * number_of_raters * (number_of_raters - 1)
)
kappa = (P_bar - P_e_bar) / (1 - P_e_bar)
return kappa
def setup_inflight_onboarding_data(self):
print('setup_inflight_onboarding_data')
raw_qc_convos = []
with open(self.onboarding_in_flight_data_file, "r") as f:
line = f.readline()
while line:
qc_convo = json.loads(line)
raw_qc_convos.append(qc_convo)
line = f.readline()
return raw_qc_convos
if __name__ == '__main__':
parser_ = TurnAnnotationsStaticResultsCompiler.setup_args()
args = parser_.parse_args()
TurnAnnotationsStaticResultsCompiler(vars(args)).compile_and_save_results()
| mit |
PDuckworth/strands_qsr_lib | qsr_lib/dbg/dbg_rcc3.py | 8 | 5041 | #!/usr/bin/python
# import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
def return_bounding_box_2d(x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
def compute_qsr(bb1, bb2):
"""Return symmetrical RCC3 relation
:param bb1: first bounding box (x_bottom_left, y_bottom_left, x_top_right, y_top_right)
:param bb2: second bounding box (x_bottom_left, y_bottom_left, x_top_right, y_top_right)
:return: an RCC3 relation from the following: 'dc':disconnected, 'po':partial overlap, 'o': occluded/part of
"""
bboxes_intercept_v, rabx, raxPrbx, raby, rayPrby = bboxes_intercept(bb1, bb2)
if bboxes_intercept_v:
if rabx > 0.0 or raby > 0:
return "po", -1
else:
occluded_points = count_occluded_points(bb1, bb2)
if occluded_points >= 4:
return "o", occluded_points
else:
return "po", occluded_points
else:
return "dc", 0
def count_occluded_points(bb1, bb2):
occluded_points = 0
bb1_4corners = ((bb1[0], bb1[1]),
(bb1[2], bb1[1]),
(bb1[2], bb1[3]),
(bb1[0], bb1[3]))
bb2_4corners = ((bb2[0], bb2[1]),
(bb2[2], bb2[1]),
(bb2[2], bb2[3]),
(bb2[0], bb2[3]))
for p in bb1_4corners:
if is_point_in_rectangle(p, bb2):
occluded_points += 1
for p in bb2_4corners:
if is_point_in_rectangle(p, bb1):
occluded_points += 1
return occluded_points
# function does not return correctly if the bounding boxes in count_occluded points are the ones below, still doesn't matter
# o1 = (.0, .0, 1., 1.)
# o2 = (.0, .0, 1.5, .5)
def is_point_in_rectangle(p, r, d=0.):
return p[0] >= r[0]-d and p[0] <= r[2]+d and p[1] >= r[0]-d and p[1] <= r[3]+d
def bboxes_intercept(bb1, bb2):
"""
https://rbrundritt.wordpress.com/2009/10/03/determining-if-two-bounding-boxes-overlap/
:param bb1: first bounding box (x_bottom_left, y_bottom_left, x_top_right, y_top_right)
:param bb2: second bounding box (x_bottom_left, y_bottom_left, x_top_right, y_top_right)
:return:
"""
# First bounding box, top left corner, bottom right corner
ATLx = bb1[0]
ATLy = bb1[3]
ABRx = bb1[2]
ABRy = bb1[1]
# Second bounding box, top left corner, bottom right corner
BTLx = bb2[0]
BTLy = bb2[3]
BBRx = bb2[2]
BBRy = bb2[1]
rabx = abs(ATLx + ABRx - BTLx - BBRx)
raby = abs(ATLy + ABRy - BTLy - BBRy)
# rAx + rBx
raxPrbx = ABRx - ATLx + BBRx - BTLx
# rAy + rBy
rayPrby = ATLy - ABRy + BTLy - BBRy
if(rabx <= raxPrbx) and (raby <= rayPrby):
return True, rabx, raxPrbx, raby, rayPrby
else:
return False, rabx, raxPrbx, raby, rayPrby
def plot_bbs(bb1, bb2):
plt.figure()
ax = plt.gca()
# ax.invert_yaxis()
ax.add_patch(Rectangle((bb1[0], bb1[1]), bb1[2]-bb1[0], bb1[3]-bb1[1], alpha=1, facecolor="blue"))
ax.add_patch(Rectangle((bb2[0], bb2[1]), bb2[2]-bb2[0], bb2[3]-bb2[1], alpha=1, facecolor="red"))
h = 6
l = 0
# ax.set_xlim(l, h)
# ax.set_ylim(l, h)
ax.set_xlim(l, h)
ax.set_ylim(h, l)
plt.show()
# o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
# Object_State(name="o1", timestamp=1, x=1., y=2., xsize=5., ysize=8.),
# Object_State(name="o1", timestamp=2, x=1., y=3., xsize=5., ysize=8.)]
#
# o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
# Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.),
# Object_State(name="o2", timestamp=2, x=11., y=3., xsize=5., ysize=8.),
# Object_State(name="o2", timestamp=3, x=11., y=4., xsize=5., ysize=8.)]
#
# o3 = [Object_State(name="o3", timestamp=0, x=1., y=11., xsize=5., ysize=8.),
# Object_State(name="o3", timestamp=1, x=2., y=11., xsize=5., ysize=8.),
# Object_State(name="o3", timestamp=2, x=3., y=11., xsize=5., ysize=8.)]
o1 = (2.0, 2.0, 2., 2.)
o2 = (4.0, 3.0, 1., 1.)
o1 = return_bounding_box_2d(o1[0], o1[1], o1[2], o1[3])
o2 = return_bounding_box_2d(o2[0], o2[1], o2[2], o2[3])
print("o1o2:", bboxes_intercept(o1, o2))
print("o2o1:", bboxes_intercept(o2, o1))
# print("o1:", o1)
# print("o2:", o2)
print("o1o2:", compute_qsr(o1, o2))
print("o2o1:", compute_qsr(o2, o1))
plot_bbs(o1, o2)
| mit |
nmartensen/pandas | pandas/io/sas/sas7bdat.py | 5 | 27243 | """
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas._sas import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.index.dataSubheaderIndex
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.index.rowSizeIndex:
processor = self._process_rowsize_subheader
elif subheader_index == const.index.columnSizeIndex:
processor = self._process_columnsize_subheader
elif subheader_index == const.index.columnTextIndex:
processor = self._process_columntext_subheader
elif subheader_index == const.index.columnNameIndex:
processor = self._process_columnname_subheader
elif subheader_index == const.index.columnAttributesIndex:
processor = self._process_columnattributes_subheader
elif subheader_index == const.index.formatAndLabelIndex:
processor = self._process_format_subheader
elif subheader_index == const.index.columnListIndex:
processor = self._process_columnlist_subheader
elif subheader_index == const.index.subheaderCountsIndex:
processor = self._process_subheader_counts
elif subheader_index == const.index.dataSubheaderIndex:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates:
unit = None
if self.column_formats[j] in const.sas_date_formats:
unit = 'd'
elif self.column_formats[j] in const.sas_datetime_formats:
unit = 's'
if unit:
rslt[name] = pd.to_datetime(rslt[name], unit=unit,
origin="1960-01-01")
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
| bsd-3-clause |
cbertinato/pandas | pandas/io/json/normalize.py | 1 | 9424 | # ---------------------------------------------------------------------
# JSON normalization routines
from collections import defaultdict
import copy
import numpy as np
from pandas._libs.writers import convert_json_to_lines
from pandas import DataFrame
def _convert_to_line_delimits(s):
"""
Helper function that converts JSON lists to line delimited JSON.
"""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == '[' and s[-1] == ']':
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(ds, prefix="", sep=".", level=0):
"""
A simplified json_normalize.
Converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None,
errors='raise',
sep='.'):
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
meta_prefix : string, default None
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
errors : {'raise', 'ignore'}, default 'raise'
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present
* 'raise' : will raise KeyError if keys listed in meta are not
always present
.. versionadded:: 0.20.0
sep : string, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
.. versionadded:: 0.20.0
Returns
-------
frame : DataFrame
Examples
--------
>>> from pandas.io.json import json_normalize
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> json_normalize(data)
id name name.family name.first name.given name.last
0 1.0 NaN NaN Coleen NaN Volk
1 NaN NaN Regner NaN Mose NaN
2 2.0 Faye Raker NaN NaN NaN NaN
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
>>> data = {'A': [1, 2]}
>>> json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
if not isinstance(sep, str):
sep = str(sep)
meta_keys = [sep.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == 'ignore':
meta_val = np.nan
else:
raise KeyError("Try running with "
"errors='ignore' as key "
"{err} is not always present"
.format(err=e))
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(
columns=lambda x: "{p}{c}".format(p=record_prefix, c=x))
# Data types, a problem
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name {name}, '
'need distinguishing prefix '.format(name=k))
# forcing dtype to object to avoid the metadata being casted to string
result[k] = np.array(v, dtype=object).repeat(lengths)
return result
| bsd-3-clause |
Nikea/pyXPCS | pyxpcs/minimize.py | 1 | 5523 | # Licensed as BSD by Yuriy Chushkin of the ESRF on 2014-08-06
################################################################################
# Copyright (c) 2014, the European Synchrotron Radiation Facility #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of the European Synchrotron Radiation Facility nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
from array import *
#from scipy import *
from scipy.optimize import leastsq
import scipy.io.array_import
from scipy import factorial
from numpy import *
import matplotlib.pylab as pyl
def residuals(p,y,x,yerr):
err = (y-peval(x,p))/yerr
return err
#########################################################
def residuals_duri_global(p, y, x,yerr,qval):
print p
#not the squared ones, squaring will be handled by leastsq
err = 0
xplot=pyl.array(x,typecode='d')
for i,q in enumerate(qval):
q= float(q)
y_calc=peval_duri_global(x,p,q)
# y_calcplot=pyl.array(y_calc,typecode='d')
err+=(y[i,:]-y_calc)**2
# err+=y[i,:]-y_calc
# pyl.figure(2)
# yplot=pyl.array(y[i,:],typecode='d')
# pyl.subplot(211)
# pyl.semilogx(xplot,y_calcplot-yplot,'-')
# pyl.hold(False)
# pyl.subplot(212)
# pyl.semilogx(xplot,yplot,'o',xplot,y_calcplot,'-')
# pyl.hold(False)
err = sqrt(err)
return err
#########################################################
def peval_duri_global(x,p,q):
global func
return eval(func)
#########################################################
def peval(x,p):
global func
return eval(func)
#########################################################
def def_func(npar,c0):
global func
for i in range(npar):
par_old='p'+str(i)
par_new='p['+str(i)+']'
func= func.replace(par_old,par_new)
nconst=len(c0)
for i in range(nconst):
name='c'+str(i)
value=str(c0[i])
func=func.replace(name,value)
print func
return func
#########################################################
def fitting(x,y,f,p0,c0,yerr='none',qval='none'):
if yerr=='none':
yerr=y*0+y/y
global func
func=f
npar=len(p0)
func=def_func(npar,c0)
print 'fitting with funcion: ', func
print 'no of parameters: ', len(p0)
# plsq = leastsq(residuals, p0, args=(y,x,yerr), col_deriv=1, maxfev=20000)
if 'duri' in func:
plsq= leastsq(residuals_duri_global, p0, args=(y,x,yerr,qval), col_deriv=0, ftol=1e-4, maxfev=2000000)
else:
plsq = leastsq(residuals, p0, args=(y,x,yerr), col_deriv=0, maxfev=20000)
if npar==1:
final_par = array([plsq[0]])
else:
final_par = plsq[0]
if 'duri' in func:
yfit=0*y
for i,q in enumerate(qval):
q=float(q)
yfit[i,:]=pyl.array(peval_duri_global(x,final_par,q),typecode='d')
else:
yfit=pyl.array(peval(x,final_par),typecode='d')
return yfit, final_par, func
def func_duri(x,q,gamma0,delta,alfa,n=101):
gn=resize(0*x,[n,len(x)])
alfa=abs(cos(alfa))
for k in range(n):
P_k=(exp(-abs(gamma0)*x)*(abs(gamma0)*x)**k)/factorial(k)
gn[k,:]= P_k*exp(-(q*abs(delta)*k**abs(alfa))**2)
g1=sum(gn,axis=0)
return g1
#####from yuriy
#out=leastsq(errfunc,p0,args=(xdata,ydata,b),full_output=1)
# p1[i,1:]=out[0]
# covar=out[1]
# err[i,:]=sqrt(diag(covar,k=0))
| bsd-3-clause |
michaelaye/planet4 | planet4/io.py | 1 | 17380 | import configparser
import datetime as dt
import logging
import os
import shutil
from pathlib import Path
from urllib.error import URLError
import matplotlib.image as mplimg
import pandas as pd
import pkg_resources as pr
from . import stats
from .exceptions import NoFilesFoundError
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
pkg_name = __name__.split('.')[0]
configpath = Path.home() / ".{}.ini".format(pkg_name)
LOGGER = logging.getLogger(__name__)
def get_config():
"""Read the configfile and return config dict.
Returns
-------
dict
Dictionary with the content of the configpath file.
"""
if not configpath.exists():
raise IOError("Config file {} not found.".format(str(configpath)))
else:
config = configparser.ConfigParser()
config.read(str(configpath))
return config
def set_database_path(dbfolder):
"""Use to write the database path into the config.
Parameters
----------
dbfolder : str or pathlib.Path
Path to where planet4 will store clustering results by default.
"""
try:
d = get_config()
except IOError:
d = configparser.ConfigParser()
d['planet4_db'] = {}
d['planet4_db']['path'] = dbfolder
with configpath.open('w') as f:
d.write(f)
print("Saved database path into {}.".format(configpath))
def get_data_root():
d = get_config()
data_root = Path(d['planet4_db']['path']).expanduser()
data_root.mkdir(exist_ok=True, parents=True)
return data_root
def get_ground_projection_root():
d = get_config()
gp_root = Path(d['ground_projection']['path'])
gp_root.mkdir(exist_ok=True)
return gp_root
if not configpath.exists():
print("No configuration file {} found.\n".format(configpath))
savepath = input(
"Please provide the path where you want to store planet4 results:")
set_database_path(savepath)
else:
data_root = get_data_root()
def dropbox():
return Path.home() / 'Dropbox'
def p4data():
return dropbox() / 'data' / 'planet4'
def analysis_folder():
name = 'p4_analysis'
if p4data().exists():
path = p4data() / name
else:
path = dropbox() / name
return path
def check_and_pad_id(imgid):
"Does NOT work with pd.Series item."
if imgid is None:
return None
imgid_template = "APF0000000"
if len(imgid) < len(imgid_template):
imgid = imgid_template[:-len(imgid)] + imgid
return imgid
def get_subframe(url):
"""Download image if not there yet and return numpy array.
Takes a data record (called 'line'), picks out the image_url.
First checks if the name of that image is already stored in
the image path. If not, it grabs it from the server.
Then uses matplotlib.image to read the image into a numpy-array
and finally returns it.
"""
targetpath = data_root / 'images' / os.path.basename(url)
targetpath.parent.mkdir(exist_ok=True)
if not targetpath.exists():
LOGGER.info("Did not find image in cache. Downloading ...")
try:
path = urlretrieve(url)[0]
except URLError:
msg = "Cannot receive subframe image. No internet?"
LOGGER.error(msg)
return None
LOGGER.debug("Done.")
shutil.move(path, str(targetpath))
else:
LOGGER.debug("Found image in cache.")
im = mplimg.imread(targetpath)
return im
class P4DBName(object):
def __init__(self, fname):
self.p = Path(fname)
date = str(self.name)[:10]
self.date = dt.datetime(*[int(i) for i in date.split('-')])
def __getattr__(self, name):
"looking up things in the Path object if not in `self`."
return getattr(self.p, name)
def get_latest_file(filenames):
fnames = list(filenames)
if len(fnames) == 0:
raise NoFilesFoundError
retval = P4DBName(fnames[0])
dtnow = retval.date
for fname in fnames[1:]:
dt_to_check = P4DBName(fname).date
if dt_to_check > dtnow:
dtnow = dt_to_check
retval = P4DBName(fname)
return retval.p
def get_latest_cleaned_db(datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = list(datadir.glob('201*_queryable_cleaned*.h5'))
if len(h5files) == 0:
LOGGER.error("No files found. Searching in %s", str(datadir))
raise NoFilesFoundError(f"No files found. Searching in {str(datadir)}")
return get_latest_file(h5files)
def get_latest_season23_dbase(datadir=None):
if datadir is None:
datadir = data_root
h5files = list(datadir.glob('201*_queryable_cleaned_seasons2and3.h5'))
return get_latest_file(h5files)
def get_test_database():
fname = pr.resource_filename('planet4', 'data/test_db.csv')
return pd.read_csv(fname)
def get_latest_tutorial_data(datadir=None):
if datadir is None:
datadir = data_root
tut_files = datadir.glob('/*_tutorials.h5')
tut_files = [i for i in tut_files if i.parent[:4].isdigit()]
if not tut_files:
raise NoFilesFoundError
return pd.read_hdf(str(get_latest_file(tut_files)), 'df')
def common_gold_ids():
# read the common gold_ids to check
with open('../data/gold_standard_commons.txt') as f:
gold_ids = f.read()
gold_ids = gold_ids.split('\n')
del gold_ids[-1] # last one is empty
return gold_ids
def get_image_names_from_db(dbfname):
"""Return arrary of HiRISE image_names from database file.
Parameters
----------
dbfname : pathlib.Path or str
Path to database file to be used.
Returns
-------
numpy.ndarray
Array of unique image names.
"""
path = Path(dbfname)
if path.suffix in ['.hdf', '.h5']:
with pd.HDFStore(str(dbfname)) as store:
return store.select_column('df', 'image_name').unique()
elif path.suffix == '.csv':
return pd.read_csv(dbfname).image_id.unique()
def get_latest_marked():
return pd.read_hdf(str(get_latest_cleaned_db()), 'df',
where='marking!=None')
def get_image_id_from_fname(fname):
"Return image_id from beginning of Path(fname).name"
fname = Path(fname)
name = fname.name
return name.split('_')[0]
def get_image_ids_in_folder(folder, extension='.csv'):
fnames = Path(folder).glob('*' + extension)
return [get_image_id_from_fname(i) for i in fnames]
class PathManager(object):
"""Manage file paths and folders related to the analysis pipeline.
Level definitions:
* L0 : Raw output of Planet Four
* L1A : Clustering of Blotches and Fans on their own
* L1B : Clustered blotches and fans combined into final fans, final blotches, and fnotches that
need to have a cut applied for the decision between fans or blotches.
* L1C : Derived database where a cut has been applied for fnotches to become either fan or
blotch.
Parameters
----------
id_ : str, optional
The data item id that is used to determine sub-paths. Can be set after
init.
datapath : str or pathlib.Path, optional
the base path from where to manage all derived paths. No default assumed
to prevent errors.
suffix : {'.hdf', '.h5', '.csv'}
The suffix that controls the reader function to be used.
obsid : str, optional
HiRISE obsid (i.e. P4 image_name), added as a folder inside path.
Can be set after init.
extra_path : str, pathlib.Path, optional
Any extra path element that needs to be added to the standard path.
Attributes
----------
cut_dir : pathlib.Path
Defined in `get_cut_folder`.
"""
def __init__(self, id_='', datapath='clustering', suffix='.csv', obsid='', cut=0.5,
extra_path=''):
self.id = id_
self.cut = cut
self._obsid = obsid
self.extra_path = extra_path
if datapath is None:
# take default path if none given
self._datapath = Path(data_root) / 'clustering'
elif Path(datapath).is_absolute():
# if given datapath is absolute, take only that:
self._datapath = Path(datapath)
else:
# if it is relative, add it to data_root
self._datapath = Path(data_root) / datapath
self.suffix = suffix
# point reader to correct function depending on required suffix
if suffix in ['.hdf', '.h5']:
self.reader = pd.read_hdf
elif suffix == '.csv':
self.reader = pd.read_csv
# making sure to warn the user here if the data isn't where it's expected to be
if id_ != '':
if not self.path_so_far.exists():
raise FileNotFoundError(f"{self.path_so_far} does not exist.")
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is not None:
self._id = check_and_pad_id(value)
@property
def clustering_logfile(self):
return self.fanfile.parent / 'clustering_settings.yaml'
@property
def obsid(self):
if self._obsid is '':
if self.id is not '':
LOGGER.debug("Entering obsid search for known image_id.")
db = DBManager()
data = db.get_image_id_markings(self.id)
try:
obsid = data.image_name.iloc[0]
except IndexError:
raise IndexError("obsid access broken. Did you forget to use the `obsid` keyword"
" at initialization?")
LOGGER.debug("obsid found: %s", obsid)
self._obsid = obsid
return self._obsid
@obsid.setter
def obsid(self, value):
self._obsid = value
@property
def obsid_results_savefolder(self):
subfolder = 'p4_catalog' if self.datapath is None else self.datapath
savefolder = analysis_folder() / subfolder
savefolder.mkdir(exist_ok=True, parents=True)
return savefolder
@property
def obsid_final_fans_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_fans.csv"
@property
def obsid_final_blotches_path(self):
return self.obsid_results_savefolder / f"{self.obsid}_blotches.csv"
@property
def datapath(self):
return self._datapath
@property
def path_so_far(self):
p = self.datapath
p /= self.extra_path
p /= self.obsid
return p
@property
def L1A_folder(self):
"Subfolder name for the clustered data before fnotching."
return 'L1A'
@property
def L1B_folder(self):
"Subfolder name for the fnotched data, before cut is applied."
return 'L1B'
@property
def L1C_folder(self):
"subfolder name for the final catalog after applying `cut`."
return 'L1C_cut_{:.1f}'.format(self.cut)
def get_path(self, marking, specific=''):
p = self.path_so_far
# now add the image_id
try:
p /= self.id
except TypeError:
logging.warning("self.id not set. Storing in obsid level.")
id_ = self.id if self.id != '' else self.obsid
# add the specific sub folder
p /= specific
if specific != '':
p /= f"{id_}_{specific}_{marking}{self.suffix}"
else:
# prepend the data level to file name if given.
p /= f"{id_}_{marking}{self.suffix}"
return p
def get_obsid_paths(self, level):
"""get all existing paths for a given data level.
Parameters
----------
level : {'L1A', 'L1B', 'L1C'}
"""
folder = self.path_so_far
# cast to upper case for the lazy... ;)
level = level.upper()
image_id_paths = [item for item in folder.glob('*') if item.is_dir()]
bucket = []
for p in image_id_paths:
try:
bucket.append(next(p.glob(f"{level}*")))
except StopIteration:
continue
return bucket
def get_df(self, fpath):
return self.reader(str(fpath))
@property
def fanfile(self):
return self.get_path('fans', self.L1A_folder)
@property
def fandf(self):
return self.get_df(self.fanfile)
@property
def reduced_fanfile(self):
return self.get_path('fans', self.L1B_folder)
@property
def reduced_fandf(self):
return self.get_df(self.reduced_fanfile)
@property
def final_fanfile(self):
return self.get_path('fans', self.L1C_folder)
@property
def final_fandf(self):
return self.get_df(self.final_fanfile)
@property
def blotchfile(self):
return self.get_path('blotches', self.L1A_folder)
@property
def blotchdf(self):
return self.get_df(self.blotchfile)
@property
def reduced_blotchfile(self):
return self.get_path('blotches', self.L1B_folder)
@property
def reduced_blotchdf(self):
return self.get_df(self.reduced_blotchfile)
@property
def final_blotchfile(self):
return self.get_path('blotches', self.L1C_folder)
@property
def final_blotchdf(self):
return self.get_df(self.final_blotchfile)
@property
def fnotchfile(self):
return self.get_path('fnotches', self.L1B_folder)
@property
def fnotchdf(self):
# the fnotchfile has an index, so i need to read that here:
return pd.read_csv(self.fnotchfile, index_col=0)
class DBManager(object):
"""Access class for database activities.
Provides easy access to often used data items.
Parameters
----------
dbname : str, optional
Path to database file to be used. Default: use get_latest_cleaned_db() to
find it.
Attributes
----------
image_names
image_ids
n_image_ids
n_image_names
obsids : Alias to image_ids
season2and3_image_names
"""
def __init__(self, dbname=None):
"""Initialize DBManager class.
Parameters
----------
dbname : <str>
Filename of database file to use. Default: Latest produced full
database.
"""
if dbname is None:
self.dbname = str(get_latest_cleaned_db())
else:
self.dbname = str(dbname)
def __repr__(self):
s = "Database root: {}\n".format(Path(self.dbname).parent)
s += "Database name: {}\n".format(Path(self.dbname).name)
return s
@property
def orig_csv(self):
p = Path(self.dbname)
return p.parent / (p.name[:38] + '.csv')
def set_latest_with_dupes_db(self, datadir=None):
datadir = data_root if datadir is None else Path(datadir)
h5files = datadir.glob('201*_queryable.h5')
dbname = get_latest_file(h5files)
print("Setting {} as dbname.".format(dbname.name))
self.dbname = str(dbname)
@property
def image_names(self):
"""Return list of unique obsids used in database.
See also
--------
get_image_names_from_db
"""
return get_image_names_from_db(self.dbname)
@property
def image_ids(self):
"Return list of unique image_ids in database."
with pd.HDFStore(self.dbname) as store:
return store.select_column('df', 'image_id').unique()
@property
def n_image_ids(self):
return len(self.image_ids)
@property
def n_image_names(self):
return len(self.image_names)
@property
def obsids(self):
"Alias to self.image_names."
return self.image_names
def get_all(self, datadir=None):
return pd.read_hdf(str(self.dbname), 'df')
def get_obsid_markings(self, obsid):
"Return marking data for given HiRISE obsid."
return pd.read_hdf(self.dbname, 'df', where='image_name=' + obsid)
def get_image_name_markings(self, image_name):
"Alias for get_obsid_markings."
return self.get_obsid_markings(image_name)
def get_image_id_markings(self, image_id):
"Return marking data for one Planet4 image_id"
image_id = check_and_pad_id(image_id)
return pd.read_hdf(self.dbname, 'df', where='image_id=' + image_id)
def get_data_for_obsids(self, obsids):
bucket = []
for obsid in obsids:
bucket.append(self.get_obsid_markings(obsid))
return pd.concat(bucket, ignore_index=True)
def get_classification_id_data(self, class_id):
"Return data for one classification_id"
return pd.read_hdf(self.dbname, 'df',
where="classification_id=='{}'".format(class_id))
@property
def season2and3_image_names(self):
"numpy.array : List of image_names for season 2 and 3."
image_names = self.image_names
metadf = pd.DataFrame(pd.Series(image_names).astype(
'str'), columns=['image_name'])
stats.define_season_column(metadf)
return metadf[(metadf.season > 1) & (metadf.season < 4)].image_name.unique()
def get_general_filter(self, f):
return pd.read_hdf(self.dbname, 'df', where=f)
| isc |
juancq/shape-deform | gplib.py | 1 | 47610 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""The :mod:`gp` module provides the methods and classes to perform
Genetic Programming with DEAP. It essentially contains the classes to
build a Genetic Program Tree, and the functions to evaluate it.
This module support both strongly and loosely typed GP.
"""
import copy
import math
import random
import re
import sys
import warnings
from collections import defaultdict, deque
from functools import partial, wraps
from inspect import isclass
from operator import eq, lt
from deap import tools # Needed by HARM-GP
######################################
# GP Data structure #
######################################
# Define the name of type for any types.
__type__ = object
class PrimitiveTree(list):
"""Tree specifically formatted for optimization of genetic
programming operations. The tree is represented with a
list where the nodes are appended in a depth-first order.
The nodes appended to the tree are required to
have an attribute *arity* which defines the arity of the
primitive. An arity of 0 is expected from terminals nodes.
"""
def __init__(self, content):
list.__init__(self, content)
def __deepcopy__(self, memo):
new = self.__class__(self)
new.__dict__.update(copy.deepcopy(self.__dict__, memo))
return new
def __setitem__(self, key, val):
# Check for most common errors
# Does NOT check for STGP constraints
if isinstance(key, slice):
if key.start >= len(self):
raise IndexError("Invalid slice object (try to assign a %s"
" in a tree of size %d). Even if this is allowed by the"
" list object slice setter, this should not be done in"
" the PrimitiveTree context, as this may lead to an"
" unpredictable behavior for searchSubtree or evaluate."
% (key, len(self)))
total = val[0].arity
for node in val[1:]:
total += node.arity - 1
if total != 0:
raise ValueError("Invalid slice assignation : insertion of"
" an incomplete subtree is not allowed in PrimitiveTree."
" A tree is defined as incomplete when some nodes cannot"
" be mapped to any position in the tree, considering the"
" primitives' arity. For instance, the tree [sub, 4, 5,"
" 6] is incomplete if the arity of sub is 2, because it"
" would produce an orphan node (the 6).")
elif val.arity != self[key].arity:
raise ValueError("Invalid node replacement with a node of a"
" different arity.")
list.__setitem__(self, key, val)
def js_str(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
if type(prim) is Primitive:
string = prim.d_format(*args)
else:
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
def __str__(self):
"""Return the expression in a human readable string.
"""
string = ""
stack = []
for node in self:
stack.append((node, []))
while len(stack[-1][1]) == stack[-1][0].arity:
prim, args = stack.pop()
string = prim.format(*args)
if len(stack) == 0:
break # If stack is empty, all nodes should have been seen
stack[-1][1].append(string)
return string
@classmethod
def from_string(cls, string, pset):
"""Try to convert a string expression into a PrimitiveTree given a
PrimitiveSet *pset*. The primitive set needs to contain every primitive
present in the expression.
:param string: String representation of a Python expression.
:param pset: Primitive set from which primitives are selected.
:returns: PrimitiveTree populated with the deserialized primitives.
"""
tokens = re.split("[ \t\n\r\f\v(),]", string)
expr = []
ret_types = deque()
for token in tokens:
if token == '':
continue
if len(ret_types) != 0:
type_ = ret_types.popleft()
else:
type_ = None
if token in pset.mapping:
primitive = pset.mapping[token]
if type_ is not None and not issubclass(primitive.ret, type_):
raise TypeError("Primitive {} return type {} does not "
"match the expected one: {}."
.format(primitive, primitive.ret, type_))
expr.append(primitive)
if isinstance(primitive, Primitive):
ret_types.extendleft(reversed(primitive.args))
else:
try:
token = eval(token)
except NameError:
raise TypeError("Unable to evaluate terminal: {}.".format(token))
if type_ is None:
type_ = type(token)
if not issubclass(type(token), type_):
raise TypeError("Terminal {} type {} does not "
"match the expected one: {}."
.format(token, type(token), type_))
expr.append(Terminal(token, False, type_))
return cls(expr)
@property
def height(self):
"""Return the height of the tree, or the depth of the
deepest node.
"""
stack = [0]
max_depth = 0
for elem in self:
depth = stack.pop()
max_depth = max(max_depth, depth)
stack.extend([depth + 1] * elem.arity)
return max_depth
@property
def root(self):
"""Root of the tree, the element 0 of the list.
"""
return self[0]
def searchSubtree(self, begin):
"""Return a slice object that corresponds to the
range of values that defines the subtree which has the
element with index *begin* as its root.
"""
end = begin + 1
total = self[begin].arity
while total > 0:
total += self[end].arity - 1
end += 1
return slice(begin, end)
class Primitive(object):
"""Class that encapsulates a primitive and when called with arguments it
returns the Python code to call the primitive with the arguments.
>>> pr = Primitive("mul", (int, int), int)
>>> pr.format(1, 2)
'mul(1, 2)'
"""
__slots__ = ('name', 'arity', 'args', 'ret', 'seq')
def __init__(self, name, args, ret):
self.name = name
self.arity = len(args)
self.args = args
self.ret = ret
args = ", ".join(map("{{{0}}}".format, range(self.arity)))
self.seq = "{name}({args})".format(name=self.name, args=args)
def format(self, *args):
return self.seq.format(*args)
def d_format(self, *args):
if self.arity == 2:
return "({arg1} {name} {arg2})".format(name=self.name, arg1=args[0], arg2=args[1])
else:
return self.seq.format(*args)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Terminal(object):
"""Class that encapsulates terminal primitive in expression. Terminals can
be values or 0-arity functions.
"""
__slots__ = ('name', 'value', 'ret', 'conv_fct')
def __init__(self, terminal, symbolic, ret):
self.ret = ret
self.value = terminal
self.name = str(terminal)
self.conv_fct = str if symbolic else repr
@property
def arity(self):
return 0
def format(self):
return self.conv_fct(self.value)
def __eq__(self, other):
if type(self) is type(other):
return all(getattr(self, slot) == getattr(other, slot)
for slot in self.__slots__)
else:
return NotImplemented
class Ephemeral(Terminal):
"""Class that encapsulates a terminal which value is set when the
object is created. To mutate the value, a new object has to be
generated. This is an abstract base class. When subclassing, a
staticmethod 'func' must be defined.
"""
def __init__(self):
Terminal.__init__(self, self.func(), symbolic=False, ret=self.ret)
@staticmethod
def func():
"""Return a random value used to define the ephemeral state.
"""
raise NotImplementedError
class PrimitiveSetTyped(object):
"""Class that contains the primitives that can be used to solve a
Strongly Typed GP problem. The set also defined the researched
function return type, and input arguments type and number.
"""
def __init__(self, name, in_types, ret_type, prefix="ARG"):
self.terminals = defaultdict(list)
self.primitives = defaultdict(list)
self.arguments = []
# setting "__builtins__" to None avoid the context
# being polluted by builtins function when evaluating
# GP expression.
self.context = {"__builtins__": None}
self.mapping = dict()
self.terms_count = 0
self.prims_count = 0
self.name = name
self.ret = ret_type
self.ins = in_types
for i, type_ in enumerate(in_types):
arg_str = "{prefix}{index}".format(prefix=prefix, index=i)
self.arguments.append(arg_str)
term = Terminal(arg_str, True, type_)
self._add(term)
self.terms_count += 1
def renameArguments(self, **kargs):
"""Rename function arguments with new names from *kargs*.
"""
for i, old_name in enumerate(self.arguments):
if old_name in kargs:
new_name = kargs[old_name]
self.arguments[i] = new_name
self.mapping[new_name] = self.mapping[old_name]
self.mapping[new_name].value = new_name
del self.mapping[old_name]
def _add(self, prim):
def addType(dict_, ret_type):
if not ret_type in dict_:
new_list = []
for type_, list_ in dict_.items():
if issubclass(type_, ret_type):
for item in list_:
if not item in new_list:
new_list.append(item)
dict_[ret_type] = new_list
addType(self.primitives, prim.ret)
addType(self.terminals, prim.ret)
self.mapping[prim.name] = prim
if isinstance(prim, Primitive):
for type_ in prim.args:
addType(self.primitives, type_)
addType(self.terminals, type_)
dict_ = self.primitives
else:
dict_ = self.terminals
for type_ in dict_:
if issubclass(prim.ret, type_):
dict_[type_].append(prim)
def addPrimitive(self, primitive, in_types, ret_type, name=None):
"""Add a primitive to the set.
:param primitive: callable object or a function.
:parma in_types: list of primitives arguments' type
:param ret_type: type returned by the primitive.
:param name: alternative name for the primitive instead
of its __name__ attribute.
"""
if name is None:
name = primitive.__name__
prim = Primitive(name, in_types, ret_type)
assert name not in self.context or \
self.context[name] is primitive, \
"Primitives are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second '%s' primitive." % (name,)
self._add(prim)
self.context[prim.name] = primitive
self.prims_count += 1
def addTerminal(self, terminal, ret_type, name=None):
"""Add a terminal to the set. Terminals can be named
using the optional *name* argument. This should be
used : to define named constant (i.e.: pi); to speed the
evaluation time when the object is long to build; when
the object does not have a __repr__ functions that returns
the code to build the object; when the object class is
not a Python built-in.
:param terminal: Object, or a function with no arguments.
:param ret_type: Type of the terminal.
:param name: defines the name of the terminal in the expression.
"""
symbolic = False
if name is None and callable(terminal):
name = terminal.__name__
assert name not in self.context, \
"Terminals are required to have a unique name. " \
"Consider using the argument 'name' to rename your "\
"second %s terminal." % (name,)
if name is not None:
self.context[name] = terminal
terminal = name
symbolic = True
elif terminal in (True, False):
# To support True and False terminals with Python 2.
self.context[str(terminal)] = terminal
prim = Terminal(terminal, symbolic, ret_type)
self._add(prim)
self.terms_count += 1
def addEphemeralConstant(self, name, ephemeral, ret_type):
"""Add an ephemeral constant to the set. An ephemeral constant
is a no argument function that returns a random value. The value
of the constant is constant for a Tree, but may differ from one
Tree to another.
:param name: name used to refers to this ephemeral type.
:param ephemeral: function with no arguments returning a random value.
:param ret_type: type of the object returned by *ephemeral*.
"""
module_gp = globals()
if not name in module_gp:
class_ = type(name, (Ephemeral,), {'func': staticmethod(ephemeral),
'ret': ret_type})
module_gp[name] = class_
else:
class_ = module_gp[name]
if issubclass(class_, Ephemeral):
if class_.func is not ephemeral:
raise Exception("Ephemerals with different functions should "
"be named differently, even between psets.")
elif class_.ret is not ret_type:
raise Exception("Ephemerals with the same name and function "
"should have the same type, even between psets.")
else:
raise Exception("Ephemerals should be named differently "
"than classes defined in the gp module.")
self._add(class_)
self.terms_count += 1
def addADF(self, adfset):
"""Add an Automatically Defined Function (ADF) to the set.
:param adfset: PrimitiveSetTyped containing the primitives with which
the ADF can be built.
"""
prim = Primitive(adfset.name, adfset.ins, adfset.ret)
self._add(prim)
self.prims_count += 1
@property
def terminalRatio(self):
"""Return the ratio of the number of terminals on the number of all
kind of primitives.
"""
return self.terms_count / float(self.terms_count + self.prims_count)
class PrimitiveSet(PrimitiveSetTyped):
"""Class same as :class:`~deap.gp.PrimitiveSetTyped`, except there is no
definition of type.
"""
def __init__(self, name, arity, prefix="ARG"):
args = [__type__] * arity
PrimitiveSetTyped.__init__(self, name, args, __type__, prefix)
self.js = None
def addPrimitive(self, primitive, arity, name=None, js=None):
"""Add primitive *primitive* with arity *arity* to the set.
If a name *name* is provided, it will replace the attribute __name__
attribute to represent/identify the primitive.
"""
assert arity > 0, "arity should be >= 1"
args = [__type__] * arity
self.js = js
PrimitiveSetTyped.addPrimitive(self, primitive, args, __type__, name)
def addTerminal(self, terminal, name=None):
"""Add a terminal to the set."""
PrimitiveSetTyped.addTerminal(self, terminal, __type__, name)
def addEphemeralConstant(self, name, ephemeral):
"""Add an ephemeral constant to the set."""
PrimitiveSetTyped.addEphemeralConstant(self, name, ephemeral, __type__)
######################################
# GP Tree compilation functions #
######################################
def compile(expr, pset):
"""Compile the expression *expr*.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param pset: Primitive set against which the expression is compile.
:returns: a function if the primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
code = str(expr)
if len(pset.arguments) > 0:
# This section is a stripped version of the lambdify
# function of SymPy 0.6.6.
args = ",".join(arg for arg in pset.arguments)
code = "lambda {args}: {code}".format(args=args, code=code)
try:
return eval(code, pset.context, {})
except MemoryError:
_, _, traceback = sys.exc_info()
raise MemoryError, ("DEAP : Error in tree evaluation :"
" Python cannot evaluate a tree higher than 90. "
"To avoid this problem, you should use bloat control on your "
"operators. See the DEAP documentation for more information. "
"DEAP will now abort."), traceback
def compileADF(expr, psets):
"""Compile the expression represented by a list of trees. The first
element of the list is the main tree, and the following elements are
automatically defined functions (ADF) that can be called by the first
tree.
:param expr: Expression to compile. It can either be a PrimitiveTree,
a string of Python code or any object that when
converted into string produced a valid Python code
expression.
:param psets: List of primitive sets. Each set corresponds to an ADF
while the last set is associated with the expression
and should contain reference to the preceding ADFs.
:returns: a function if the main primitive set has 1 or more arguments,
or return the results produced by evaluating the tree.
"""
adfdict = {}
func = None
for pset, subexpr in reversed(zip(psets, expr)):
pset.context.update(adfdict)
func = compile(subexpr, pset)
adfdict.update({pset.name: func})
return func
######################################
# GP Program generation functions #
######################################
def genFull(pset, min_, max_, type_=None):
"""Generate an expression where each leaf has a the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, type_)
def genGrow(pset, min_, max_, type_=None):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a a node should be a terminal.
"""
return depth == height or \
(depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, type_)
def genHalfAndHalf(pset, min_, max_, type_=None):
"""Generate an expression with a PrimitiveSet *pset*.
Half the time, the expression is generated with :func:`~deap.gp.genGrow`,
the other half, the expression is generated with :func:`~deap.gp.genFull`.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: Either, a full or a grown tree.
"""
method = random.choice((genGrow, genFull))
return method(pset, min_, max_, type_)
def genRamped(pset, min_, max_, type_=None):
"""
.. deprecated:: 1.0
The function has been renamed. Use :func:`~deap.gp.genHalfAndHalf` instead.
"""
warnings.warn("gp.genRamped has been renamed. Use genHalfAndHalf instead.",
FutureWarning)
return genHalfAndHalf(pset, min_, max_, type_)
def generate(pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of list. The tree is build
from the root to the leaves, and it stop growing when the
condition is fulfilled.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param condition: The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
:param type_: The type that should return the tree when called, when
:obj:`None` (default) the type of :pset: (pset.ret)
is assumed.
:returns: A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
term = random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a terminal of type '%s', but there is "\
"none available." % (type_,), traceback
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError, "The gp.generate function tried to add "\
"a primitive of type '%s', but there is "\
"none available." % (type_,), traceback
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
print 'in gen: ', expr
return expr
######################################
# GP Crossovers #
######################################
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
if ind1.root.ret == __type__:
# Not STGP optimization
types1[__type__] = xrange(1, len(ind1))
types2[__type__] = xrange(1, len(ind2))
common_types = [__type__]
else:
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
type_ = random.choice(list(common_types))
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
def cxOnePointLeafBiased(ind1, ind2, termpb):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind1: First typed tree participating in the crossover.
:param ind2: Second typed tree participating in the crossover.
:param termpb: The probability of chosing a terminal node (leaf).
:returns: A tuple of two typed trees.
When the nodes are strongly typed, the operator makes sure the
second node type corresponds to the first node type.
The parameter *termpb* sets the probability to choose between a terminal
or non-terminal crossover point. For instance, as defined by Koza, non-
terminal primitives are selected for 90% of the crossover points, and
terminals for 10%, so *termpb* should be set to 0.1.
"""
if len(ind1) < 2 or len(ind2) < 2:
# No crossover on single node tree
return ind1, ind2
# Determine wether we keep terminals or primitives for each individual
terminal_op = partial(eq, 0)
primitive_op = partial(lt, 0)
arity_op1 = terminal_op if random.random() < termpb else primitive_op
arity_op2 = terminal_op if random.random() < termpb else primitive_op
# List all available primitive or terminal types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
if arity_op1(node.arity):
types1[node.ret].append(idx)
for idx, node in enumerate(ind2[1:], 1):
if arity_op2(node.arity):
types2[node.ret].append(idx)
common_types = set(types1.keys()).intersection(set(types2.keys()))
if len(common_types) > 0:
# Set does not support indexing
type_ = random.sample(common_types, 1)[0]
index1 = random.choice(types1[type_])
index2 = random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# GP Mutations #
######################################
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
slice_ = individual.searchSubtree(index)
type_ = individual[index].ret
individual[slice_] = expr(pset=pset, type_=type_)
return individual,
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
if len(individual) < 2:
return individual,
index = random.randrange(1, len(individual))
node = individual[index]
if node.arity == 0: # Terminal
term = random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives[node.ret] if p.args == node.args]
individual[index] = random.choice(prims)
return individual,
def mutEphemeral(individual, mode):
"""This operator works on the constants of the tree *individual*. In
*mode* ``"one"``, it will change the value of one of the individual
ephemeral constants by calling its generator function. In *mode*
``"all"``, it will change the value of **all** the ephemeral constants.
:param individual: The normal or typed tree to be mutated.
:param mode: A string to indicate to change ``"one"`` or ``"all"``
ephemeral constants.
:returns: A tuple of one tree.
"""
if mode not in ["one", "all"]:
raise ValueError("Mode must be one of \"one\" or \"all\"")
ephemerals_idx = [index
for index, node in enumerate(individual)
if isinstance(node, Ephemeral)]
if len(ephemerals_idx) > 0:
if mode == "one":
ephemerals_idx = (random.choice(ephemerals_idx),)
for i in ephemerals_idx:
individual[i] = type(individual[i])()
return individual,
def mutInsert(individual, pset):
"""Inserts a new branch at a random position in *individual*. The subtree
at the chosen position is used as child node of the created subtree, in
that way, it is really an insertion rather than a replacement. Note that
the original subtree will become one of the children of the new primitive
inserted, but not perforce the first (its position is randomly selected if
the new primitive has more than one child).
:param individual: The normal or typed tree to be mutated.
:returns: A tuple of one tree.
"""
index = random.randrange(len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
choice = random.choice
# As we want to keep the current node as children of the new one,
# it must accept the return value of the current node
primitives = [p for p in pset.primitives[node.ret] if node.ret in p.args]
if len(primitives) == 0:
return individual,
new_node = choice(primitives)
new_subtree = [None] * len(new_node.args)
position = choice([i for i, a in enumerate(new_node.args) if a == node.ret])
for i, arg_type in enumerate(new_node.args):
if i != position:
term = choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
new_subtree[position:position + 1] = individual[slice_]
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
def mutShrink(individual):
"""This operator shrinks the *individual* by chosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:returns: A tuple of one tree.
"""
# We don't want to "shrink" the root
if len(individual) < 3 or individual.height <= 1:
return individual,
iprims = []
for i, node in enumerate(individual[1:], 1):
if isinstance(node, Primitive) and node.ret in node.args:
iprims.append((i, node))
if len(iprims) != 0:
index, prim = random.choice(iprims)
arg_idx = random.choice([i for i, type_ in enumerate(prim.args) if type_ == prim.ret])
rindex = index + 1
for _ in range(arg_idx + 1):
rslice = individual.searchSubtree(rindex)
subtree = individual[rslice]
rindex += len(subtree)
slice_ = individual.searchSubtree(index)
individual[slice_] = subtree
return individual,
######################################
# GP bloat control decorators #
######################################
def staticLimit(key, max_value):
"""Implement a static limit on some measurement on a GP tree, as defined
by Koza in [Koza1989]. It may be used to decorate both crossover and
mutation operators. When an invalid (over the limit) child is generated,
it is simply replaced by one of its parents, randomly selected.
This operator can be used to avoid memory errors occuring when the tree
gets higher than 90 levels (as Python puts a limit on the call stack
depth), because it can ensure that no tree higher than this limit will ever
be accepted in the population, except if it was generated at initialization
time.
:param key: The function to use in order the get the wanted value. For
instance, on a GP tree, ``operator.attrgetter('height')`` may
be used to set a depth limit, and ``len`` to set a size limit.
:param max_value: The maximum value allowed for the given measurement.
:returns: A decorator that can be applied to a GP operator using \
:func:`~deap.base.Toolbox.decorate`
.. note::
If you want to reproduce the exact behavior intended by Koza, set
*key* to ``operator.attrgetter('height')`` and *max_value* to 17.
.. [Koza1989] J.R. Koza, Genetic Programming - On the Programming of
Computers by Means of Natural Selection (MIT Press,
Cambridge, MA, 1992)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = random.choice(keep_inds)
return new_inds
return wrapper
return decorator
######################################
# GP bloat control algorithms #
######################################
def harm(population, toolbox, cxpb, mutpb, ngen,
alpha, beta, gamma, rho, nbrindsmodel=-1, mincutoff=20,
stats=None, halloffame=None, verbose=__debug__):
"""Implement bloat control on a GP evolution using HARM-GP, as defined in
[Gardner2015]. It is implemented in the form of an evolution algorithm
(similar to :func:`~deap.algorithms.eaSimple`).
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:param ngen: The number of generation.
:param alpha: The HARM *alpha* parameter.
:param beta: The HARM *beta* parameter.
:param gamma: The HARM *gamma* parameter.
:param rho: The HARM *rho* parameter.
:param nbrindsmodel: The number of individuals to generate in order to
model the natural distribution. -1 is a special
value which uses the equation proposed in
[Gardner2015] to set the value of this parameter :
max(2000, len(population))
:param mincutoff: The absolute minimum value for the cutoff point. It is
used to ensure that HARM does not shrink the population
too much at the beginning of the evolution. The default
value is usually fine.
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox.
.. note::
The recommended values for the HARM-GP parameters are *alpha=0.05*,
*beta=10*, *gamma=0.25*, *rho=0.9*. However, these parameters can be
adjusted to perform better on a specific problem (see the relevant
paper for tuning information). The number of individuals used to
model the natural distribution and the minimum cutoff point are less
important, their default value being effective in most cases.
.. [Gardner2015] M.-A. Gardner, C. Gagne, and M. Parizeau, Controlling
Code Growth by Dynamically Shaping the Genotype Size Distribution,
Genetic Programming and Evolvable Machines, 2015,
DOI 10.1007/s10710-015-9242-8
"""
def _genpop(n, pickfrom=[], acceptfunc=lambda s: True, producesizes=False):
# Generate a population of n individuals, using individuals in
# *pickfrom* if possible, with a *acceptfunc* acceptance function.
# If *producesizes* is true, also return a list of the produced
# individuals sizes.
# This function is used 1) to generate the natural distribution
# (in this case, pickfrom and acceptfunc should be let at their
# default values) and 2) to generate the final population, in which
# case pickfrom should be the natural population previously generated
# and acceptfunc a function implementing the HARM-GP algorithm.
producedpop = []
producedpopsizes = []
while len(producedpop) < n:
if len(pickfrom) > 0:
# If possible, use the already generated
# individuals (more efficient)
aspirant = pickfrom.pop()
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
else:
opRandom = random.random()
if opRandom < cxpb:
# Crossover
aspirant1, aspirant2 = toolbox.mate(*map(toolbox.clone,
toolbox.select(population, 2)))
del aspirant1.fitness.values, aspirant2.fitness.values
if acceptfunc(len(aspirant1)):
producedpop.append(aspirant1)
if producesizes:
producedpopsizes.append(len(aspirant1))
if len(producedpop) < n and acceptfunc(len(aspirant2)):
producedpop.append(aspirant2)
if producesizes:
producedpopsizes.append(len(aspirant2))
else:
aspirant = toolbox.clone(toolbox.select(population, 1)[0])
if opRandom - cxpb < mutpb:
# Mutation
aspirant = toolbox.mutate(aspirant)[0]
del aspirant.fitness.values
if acceptfunc(len(aspirant)):
producedpop.append(aspirant)
if producesizes:
producedpopsizes.append(len(aspirant))
if producesizes:
return producedpop, producedpopsizes
else:
return producedpop
halflifefunc = lambda x: (x * float(alpha) + beta)
if nbrindsmodel == -1:
nbrindsmodel = max(2000, len(population))
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
# Begin the generational process
for gen in range(1, ngen + 1):
# Estimation population natural distribution of sizes
naturalpop, naturalpopsizes = _genpop(nbrindsmodel, producesizes=True)
naturalhist = [0] * (max(naturalpopsizes) + 3)
for indsize in naturalpopsizes:
# Kernel density estimation application
naturalhist[indsize] += 0.4
naturalhist[indsize - 1] += 0.2
naturalhist[indsize + 1] += 0.2
naturalhist[indsize + 2] += 0.1
if indsize - 2 >= 0:
naturalhist[indsize - 2] += 0.1
# Normalization
naturalhist = [val * len(population) / nbrindsmodel for val in naturalhist]
# Cutoff point selection
sortednatural = sorted(naturalpop, key=lambda ind: ind.fitness)
cutoffcandidates = sortednatural[int(len(population) * rho - 1):]
# Select the cutoff point, with an absolute minimum applied
# to avoid weird cases in the first generations
cutoffsize = max(mincutoff, len(min(cutoffcandidates, key=len)))
# Compute the target distribution
targetfunc = lambda x: (gamma * len(population) * math.log(2) /
halflifefunc(x)) * math.exp(-math.log(2) *
(x - cutoffsize) / halflifefunc(x))
targethist = [naturalhist[binidx] if binidx <= cutoffsize else
targetfunc(binidx) for binidx in range(len(naturalhist))]
# Compute the probabilities distribution
probhist = [t / n if n > 0 else t for n, t in zip(naturalhist, targethist)]
probfunc = lambda s: probhist[s] if s < len(probhist) else targetfunc(s)
acceptfunc = lambda s: random.random() <= probfunc(s)
# Generate offspring using the acceptance probabilities
# previously computed
offspring = _genpop(len(population), pickfrom=naturalpop,
acceptfunc=acceptfunc, producesizes=False)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print logbook.stream
return population, logbook
def graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = range(len(expr))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = node.name if isinstance(node, Primitive) else node.value
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
devanshdalal/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 43 | 28175 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert_true(isinstance(estimator[0].steps[-1][1].random_state,
int))
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_)
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert_equal(len(estimators_samples), len(estimators))
assert_equal(len(estimators_samples[0]), len(X))
assert_equal(estimators_samples[0].dtype.kind, 'b')
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert_equal(bagging._max_samples, max_samples)
| bsd-3-clause |
rmcgibbo/mdtraj | mdtraj/formats/mol2.py | 5 | 9925 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import FormatRegistry
from mdtraj.core import element as elem
__all__ = ['load_mol2', "mol2_to_dataframes"]
@FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology, Single, Double, Triple, Aromatic, Amide
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
# Argument x is being passed as a list with only one element.
def to_element(x):
if isinstance(x, (list, tuple)):
assert len(x) == 1
x = x[0]
if '.' in x: # orbital-hybridizations in SYBL
return x.split('.')[0]
try:
# check if we can convert the whole str to an Element,
# if not, we only pass the first letter.
from mdtraj.core.element import Element
Element.getBySymbol(x)
except KeyError:
return x[0]
return x
atoms_mdtraj["element"] = atoms.atype.apply(to_element)
# Check if elements inferred from atoms.atype are valid
# If not, try to infer elements from atoms.name
try:
atoms_mdtraj['element'].apply(elem.get_by_symbol)
except KeyError:
try:
atoms_mdtraj["element"] = atoms.name.apply(to_element)
atoms_mdtraj['element'].apply(elem.get_by_symbol)
except KeyError:
raise KeyError('Invalid element passed to atoms DataFrame')
atoms_mdtraj['resSeq'] = atoms['code']
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bond_type_map = {
'1': Single,
'2': Double,
'3': Triple,
'am': Amide,
'ar': Aromatic
}
if bonds is not None:
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
# Create the bond augment information
n_bonds = bonds_mdtraj.shape[0]
bond_augment = np.zeros([n_bonds, 2], dtype=float)
# Add bond type information
bond_augment[:, 0] = [float(bond_type_map[str(bond_value)]) for bond_value in bonds["bond_type"].values]
# Add Bond "order" information, this is not known from Mol2 files
bond_augment[:, 1] = [0.0 for _ in range(n_bonds)]
# Augment array, dtype is cast to minimal representation of float
bonds_mdtraj = np.append(bonds_mdtraj, bond_augment, axis=-1)
else:
bonds_mdtraj = None
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = r"BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
if len(data["@<TRIPOS>BOND\n"]) > 1:
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s+", engine='python')
else:
bonds_frame = None
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s+", engine='python', header=None)
ncols = atoms_frame.shape[1]
names=["serial", "name", "x", "y", "z", "atype", "code", "resName", "charge", "status"]
atoms_frame.columns = names[:ncols]
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
| lgpl-2.1 |
alvarofierroclavero/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
mingwpy/scipy | scipy/stats/_multivariate.py | 35 | 69253 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| bsd-3-clause |
stuart-knock/bokeh | bokeh/cli/utils.py | 42 | 8119 | from __future__ import absolute_import, print_function
from collections import OrderedDict
from six.moves.urllib import request as urllib2
import io
import pandas as pd
from .. import charts
from . import help_messages as hm
def keep_source_input_sync(filepath, callback, start=0):
""" Monitor file at filepath checking for new lines (similar to
tail -f) and calls callback on every new line found.
Args:
filepath (str): path to the series data file (
i.e.: /source/to/my/data.csv)
callback (callable): function to be called with the a DataFrame
created from the new lines found from file at filepath
starting byte start
start (int): specifies where to start reading from the file at
filepath.
Default: 0
Returns:
DataFrame created from data read from filepath
"""
if filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
if filepath.lower().startswith('http'):
# Create a request for the given URL.
while True:
request = urllib2.Request(filepath)
data = get_data_from_url(request, start)
f = io.BytesIO(data)
f.seek(start)
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
start = len(data)
else:
f = open(filepath, 'r')
f.seek(start)
while True:
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
source = pd.read_csv(filepath)
return source
# Try to get the response. This will raise a urllib2.URLError if there is a
# problem (e.g., invalid URL).
# Reference:
# - http://stackoverflow.com/questions/5209087/python-seek-in-http-response-stream
# - http://stackoverflow.com/questions/1971240/python-seek-on-remote-file-using-http
def get_data_from_url(request, start=0, length=0):
""" Read from request after adding headers to retrieve data from byte
specified in start.
request (urllib2.Request): request object related to the data to read
start (int, optional): byte to start reading from.
Default: 0
length: length of the data range to read from start. If 0 it reads
until the end of the stream.
Default: 0
Returns:
String read from request
"""
# Add the header to specify the range to download.
if start and length:
request.add_header("Range", "bytes=%d-%d" % (start, start + length - 1))
elif start:
request.add_header("Range", "bytes=%s-" % start)
response = urllib2.urlopen(request)
# If a content-range header is present, partial retrieval worked.
if "content-range" in response.headers:
print("Partial retrieval successful.")
# The header contains the string 'bytes', followed by a space, then the
# range in the format 'start-end', followed by a slash and then the total
# size of the page (or an asterix if the total size is unknown). Lets get
# the range and total size from this.
_range, total = response.headers['content-range'].split(' ')[-1].split('/')
# Print a message giving the range information.
if total == '*':
print("Bytes %s of an unknown total were retrieved." % _range)
else:
print("Bytes %s of a total of %s were retrieved." % (_range, total))
# # No header, so partial retrieval was unsuccessful.
# else:
# print "Unable to use partial retrieval."
data = response.read()
return data
def parse_output_config(output):
"""Parse the output specification string and return the related chart
output attribute.
Attr:
output (str): String with the syntax convention specified for the
cli output option is as follows: <output_type>://<type_arg>
Valid values:
output_type: file or server
type_arg:
file_path if output_type is file
serve path if output_type is server
Returns:
dictionary containing the output arguments to pass to a chart object
"""
output_type, output_options = output.split('://')
if output_type == 'file':
return {'filename': output_options}
elif output_type == 'server':
# TODO: check if server configuration is as flexible as with plotting
# interface and add support for url/name if so.
out_opt = output_options.split("@")
attrnames = ['server', 'url', 'name']
# unpack server output parametrs in order to pass them to the plot
# creation function
kws = dict((attrn, val) for attrn, val in zip( attrnames, out_opt))
return {'server': kws['server']}
else:
msg = "Unknown output type %s found. Please use: file|server"
print (msg % output_type)
return {}
def get_chart_params(title, output, show_legend=False):
"""Parse output type and output options and return related chart
parameters. For example: returns filename if output_type is file
or server it output_type is server
Args:
title (str): the title of your plot.
output (str): selected output. Follows the following convention:
<output_type>://<type_arg> where output_type can be
`file` (in that case type_arg specifies the file path) or
`server` (in that case type_arg specify the server name).
Returns:
dictionary containing the arguments to pass to a chart object
related to title and output options
"""
params = {'title': title, 'legend': show_legend}
output_params = parse_output_config(output)
if output_params:
params.update(output_params)
return params
def get_data_series(series, source, indexes):
"""Generate an OrderedDict from the source series excluding index
and all series not specified in series.
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
OrderedDict with the data series from source
"""
series = define_series(series, source, indexes)
# generate charts data
data_series = OrderedDict()
for i, colname in enumerate(series+indexes):
try:
data_series[colname] = source[colname]
except KeyError:
raise KeyError(hm.ERR_MSG_SERIES_NOT_FOUND % (colname, source.keys()))
return data_series
def define_series(series, source, indexes):
"""If series is empty returns source_columns excluding the column
where column == index. Otherwise returns the series.split(',')
Args:
series (str): string that contains the names of the
series to keep from source, separated by `,`
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
list of the names (as str) of the series except index
"""
if not series:
return [c for c in source.columns if c not in indexes]
else:
return series.split(',')
def get_charts_mapping():
"""Return a dict with chart classes names (lower case) as keys and
their related class as values.
Returns:
dict mapping chart classes names to chart classes
"""
mapping = {}
for (clsname, cls) in charts.__dict__.items():
try:
# TODO: We may need to restore the objects filtering
# when charts creators (or builders registration) is added
# to the charts API
mapping[clsname.lower()] = cls
except TypeError:
pass
return mapping
| bsd-3-clause |
SciTools/cartopy | examples/miscellanea/un_flag.py | 2 | 7533 | """
UN Flag
-------
A demonstration of the power of Matplotlib combined with cartopy's Azimuthal
Equidistant projection to reproduce the UN flag.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.patches import PathPatch
import matplotlib.path
import matplotlib.ticker
from matplotlib.transforms import BboxTransform, Bbox
import numpy as np
# When drawing the flag, we can either use white filled land, or be a little
# more fancy and use the Natural Earth shaded relief imagery.
filled_land = True
def olive_path():
"""
Return a Matplotlib path representing a single olive branch from the
UN Flag. The path coordinates were extracted from the SVG at
https://commons.wikimedia.org/wiki/File:Flag_of_the_United_Nations.svg.
"""
olives_verts = np.array(
[[0, 2, 6, 9, 30, 55, 79, 94, 104, 117, 134, 157, 177,
188, 199, 207, 191, 167, 149, 129, 109, 87, 53, 22, 0, 663,
245, 223, 187, 158, 154, 150, 146, 149, 154, 158, 181, 184, 197,
181, 167, 153, 142, 129, 116, 119, 123, 127, 151, 178, 203, 220,
237, 245, 663, 280, 267, 232, 209, 205, 201, 196, 196, 201, 207,
211, 224, 219, 230, 220, 212, 207, 198, 195, 176, 197, 220, 239,
259, 277, 280, 663, 295, 293, 264, 250, 247, 244, 240, 240, 243,
244, 249, 251, 250, 248, 242, 245, 233, 236, 230, 228, 224, 222,
234, 249, 262, 275, 285, 291, 295, 296, 295, 663, 294, 293, 292,
289, 294, 277, 271, 269, 268, 265, 264, 264, 264, 272, 260, 248,
245, 243, 242, 240, 243, 245, 247, 252, 256, 259, 258, 257, 258,
267, 285, 290, 294, 297, 294, 663, 285, 285, 277, 266, 265, 265,
265, 277, 266, 268, 269, 269, 269, 268, 268, 267, 267, 264, 248,
235, 232, 229, 228, 229, 232, 236, 246, 266, 269, 271, 285, 285,
663, 252, 245, 238, 230, 246, 245, 250, 252, 255, 256, 256, 253,
249, 242, 231, 214, 208, 208, 227, 244, 252, 258, 262, 262, 261,
262, 264, 265, 252, 663, 185, 197, 206, 215, 223, 233, 242, 237,
237, 230, 220, 202, 185, 663],
[8, 5, 3, 0, 22, 46, 46, 46, 35, 27, 16, 10, 18,
22, 28, 38, 27, 26, 33, 41, 52, 52, 52, 30, 8, 595,
77, 52, 61, 54, 53, 52, 53, 55, 55, 57, 65, 90, 106,
96, 81, 68, 58, 54, 51, 50, 51, 50, 44, 34, 43, 48,
61, 77, 595, 135, 104, 102, 83, 79, 76, 74, 74, 79, 84,
90, 109, 135, 156, 145, 133, 121, 100, 77, 62, 69, 67, 80,
92, 113, 135, 595, 198, 171, 156, 134, 129, 124, 120, 123, 126,
129, 138, 149, 161, 175, 188, 202, 177, 144, 116, 110, 105, 99,
108, 116, 126, 136, 147, 162, 173, 186, 198, 595, 249, 255, 261,
267, 241, 222, 200, 192, 183, 175, 175, 175, 175, 199, 221, 240,
245, 250, 256, 245, 233, 222, 207, 194, 180, 172, 162, 153, 154,
171, 184, 202, 216, 233, 249, 595, 276, 296, 312, 327, 327, 327,
327, 308, 284, 262, 240, 240, 239, 239, 242, 244, 247, 265, 277,
290, 293, 296, 300, 291, 282, 274, 253, 236, 213, 235, 252, 276,
595, 342, 349, 355, 357, 346, 326, 309, 303, 297, 291, 290, 297,
304, 310, 321, 327, 343, 321, 305, 292, 286, 278, 270, 276, 281,
287, 306, 328, 342, 595, 379, 369, 355, 343, 333, 326, 318, 328,
340, 349, 366, 373, 379, 595]]).T
olives_codes = np.array([1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 79, 1, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 79, 1, 4, 4, 4, 4, 4, 4, 2, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 79, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 79, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 4,
4, 4, 4, 4, 4, 79, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 79, 1, 4, 4, 4, 4, 4, 4, 4, 4,
4, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 79, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 79], dtype=np.uint8)
return matplotlib.path.Path(olives_verts, olives_codes)
def main():
blue = '#4b92db'
# We're drawing a flag with a 3:5 aspect ratio.
fig = plt.figure(figsize=[7.5, 4.5], facecolor=blue)
# Put a blue background on the figure.
blue_background = PathPatch(matplotlib.path.Path.unit_rectangle(),
transform=fig.transFigure, color=blue,
zorder=-1)
fig.patches.append(blue_background)
# Set up the Azimuthal Equidistant and Plate Carree projections
# for later use.
az_eq = ccrs.AzimuthalEquidistant(central_latitude=90)
pc = ccrs.PlateCarree()
# Pick a suitable location for the map (which is in an Azimuthal
# Equidistant projection).
ax = fig.add_axes([0.25, 0.24, 0.5, 0.54], projection=az_eq)
# The background patch is not needed in this example.
ax.patch.set_facecolor('none')
# The Axes frame produces the outer meridian line.
for spine in ax.spines.values():
spine.update({'edgecolor': 'white', 'linewidth': 2})
# We want the map to go down to -60 degrees latitude.
ax.set_extent([-180, 180, -60, 90], ccrs.PlateCarree())
# Importantly, we want the axes to be circular at the -60 latitude
# rather than cartopy's default behaviour of zooming in and becoming
# square.
_, patch_radius = az_eq.transform_point(0, -60, pc)
circular_path = matplotlib.path.Path.circle(0, patch_radius)
ax.set_boundary(circular_path)
if filled_land:
ax.add_feature(
cfeature.LAND, facecolor='white', edgecolor='none')
else:
ax.stock_img()
gl = ax.gridlines(crs=pc, linewidth=2, color='white', linestyle='-')
# Meridians every 45 degrees, and 4 parallels.
gl.xlocator = matplotlib.ticker.FixedLocator(np.arange(-180, 181, 45))
parallels = np.arange(-30, 70, 30)
gl.ylocator = matplotlib.ticker.FixedLocator(parallels)
# Now add the olive branches around the axes. We do this in normalised
# figure coordinates
olive_leaf = olive_path()
olives_bbox = Bbox.null()
olives_bbox.update_from_path(olive_leaf)
# The first olive branch goes from left to right.
olive1_axes_bbox = Bbox([[0.45, 0.15], [0.725, 0.75]])
olive1_trans = BboxTransform(olives_bbox, olive1_axes_bbox)
# THe second olive branch goes from right to left (mirroring the first).
olive2_axes_bbox = Bbox([[0.55, 0.15], [0.275, 0.75]])
olive2_trans = BboxTransform(olives_bbox, olive2_axes_bbox)
olive1 = PathPatch(olive_leaf, facecolor='white', edgecolor='none',
transform=olive1_trans + fig.transFigure)
olive2 = PathPatch(olive_leaf, facecolor='white', edgecolor='none',
transform=olive2_trans + fig.transFigure)
fig.patches.append(olive1)
fig.patches.append(olive2)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
Winand/pandas | asv_bench/benchmarks/panel_ctor.py | 7 | 2304 | from .pandas_vb_common import *
from datetime import timedelta
class Constructors1(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
for x in range(100):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_all_different_indexes(self):
Panel.from_dict(self.data_frames)
class Constructors2(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
for x in range(100):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D'))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_equiv_indexes(self):
Panel.from_dict(self.data_frames)
class Constructors3(object):
goal_time = 0.2
def setup(self):
self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D'))
self.data_frames = {}
for x in range(100):
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_same_index(self):
Panel.from_dict(self.data_frames)
class Constructors4(object):
goal_time = 0.2
def setup(self):
self.data_frames = {}
self.start = datetime(1990, 1, 1)
self.end = datetime(2012, 1, 1)
for x in range(100):
if (x == 50):
self.end += timedelta(days=1)
self.dr = np.asarray(date_range(self.start, self.end))
self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr)
self.data_frames[x] = self.df
def time_panel_from_dict_two_different_indexes(self):
Panel.from_dict(self.data_frames)
| bsd-3-clause |
jdorvi/CSHORE | other/print_folders.py | 1 | 1423 | # coding: utf-8
#Extracts all .63.bz2 files in directory folder starting at current directory
import os
import pandas as pd
DATAFILES_DIRECTORY = "P:/02/LakeOntario/Storm/"
def main(datafiles_directory):
""" doc string """
os.chdir(datafiles_directory)
stormlist = []
for root, dirs, files in os.walk("."):
if os.path.exists(root+"/fort.63") is True:
if len(root) == 10:
print(root.strip(".").strip("\\"))
stormlist.append(root.strip(".").strip("\\"))
stormlist.sort()
stormlist1 = stormlist[0:30]
stormlist2 = stormlist[30:60]
stormlist3 = stormlist[60:90]
stormlist4 = stormlist[90:120]
stormlist5 = stormlist[120:150]
stormlist1.insert(0, len(stormlist1))
stormlist2.insert(0, len(stormlist2))
stormlist3.insert(0, len(stormlist3))
stormlist4.insert(0, len(stormlist4))
stormlist5.insert(0, len(stormlist5))
storms1 = pd.Series(stormlist1)
storms2 = pd.Series(stormlist2)
storms3 = pd.Series(stormlist3)
storms4 = pd.Series(stormlist4)
storms5 = pd.Series(stormlist5)
storms1.to_csv('stormslist1.txt', index=False)
storms2.to_csv('stormslist2.txt', index=False)
storms3.to_csv('stormslist3.txt', index=False)
storms4.to_csv('stormslist4.txt', index=False)
storms5.to_csv('stormslist5.txt', index=False)
if __name__ == '__main__':
main(DATAFILES_DIRECTORY)
| apache-2.0 |
matsushitayuki/space-modeling | denoise_setup.py | 1 | 1756 | import numpy as np
import matplotlib.pyplot as plt
from OD_setup import *
def R_gene(N,n):
R_0 = np.zeros((n,N))
p = int(N**0.5 - n**0.5 + 1)
R_init = [int(N**0.5*i + j) for i in range(p) for j in range(p)]
R_list = []
for r in R_init:
R = np.array(R_0)
R_element = [int(r + N**0.5*i + j) for i in range(int(n**0.5)) for j in range(int((n**0.5)))]
for i in range(n):
R[i,R_element[i]] = 1
R_list.append(R)
return R_list
def alpha_line(lamda,x,D,T,e):
alpha = alpha_ADMM(lamda,x,D,T)
while np.linalg.norm(x - D@alpha)**2 > e:
lamda = 0.8*lamda
alpha = alpha_ADMM(lamda,x,D,T)
return alpha
def alpha_patch(lamda,X,D,T,e,R_list):
alpha_list = []
i = 1
for r in R_list:
x = r@X
alpha_r = alpha_line(lamda,x,D,T,e)
alpha_list.append(alpha_r)
i += 1
return np.array(alpha_list)
def det_X(mu,X,D,alpha_list,R_list,inv):
RDalpha = np.zeros((R_list[1].shape[1],1))
for i in range(len(R_list)):
RDalpha += R_list[i].T @ D @ alpha_list[i]
X = inv @ (mu*X + RDalpha)
return X
def denoise_show(x_list):
for i in range(len(x_list)):
x_t = x_list[i].reshape(int(x_list[i].shape[0]**0.5),int(x_list[i].shape[0]**0.5))
plt.subplot(1,3,i+1)
plt.imshow(x_t)
plt.gray()
def R(X,i,j,n):
return X[i:i+n,j:j+n]
def R_T(x,i,j,N):
n = x.shape[0]
C = np.zeros((N,N))
C[i:i+n,j:j+n] = x
return C
def R_T_R(N,n):
A = np.ones((N,N))
B = np.zeros((N,N))
for i in range(N-n+1):
for j in range(N-n+1):
RA = R(A,i,j,n)
R_T_RA = R_T(RA,i,j,N)
B += R_T_RA
B = B.reshape(N**2,1)
return B
| gpl-3.0 |
mrgloom/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_emptyclusKmeans.py | 3 | 1902 | import sys
sys.path.insert(1, "../../../")
import h2o
import random
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
def emptyclusKmeans(ip,port):
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing ozone.csv data...\n")
ozone_sci = np.loadtxt(h2o.locate("smalldata/glm_test/ozone.csv"), delimiter=',', skiprows=1)
ozone_h2o = h2o.import_file(path=h2o.locate("smalldata/glm_test/ozone.csv"))
ncent = 10
nempty = random.randint(1,ncent/2)
initial_centers = [[41,190,67,7.4],
[36,118,72,8],
[12,149,74,12.6],
[18,313,62,11.5],
[23,299,65,8.6],
[19,99,59,13.8],
[8,19,61,20.1],
[16,256,69,9.7],
[11,290,66,9.2],
[14,274,68,10.9]]
for i in random.sample(range(0,ncent-1), nempty):
initial_centers[i] = [100*i for z in range(1,len(initial_centers[0])+1)]
initial_centers_h2o = h2o.H2OFrame(initial_centers)
initial_centers_sci = np.asarray(initial_centers)
#Log.info("Initial cluster centers:")
print "H2O initial centers:"
initial_centers_h2o.show()
print "scikit initial centers:"
print initial_centers_sci
# H2O can handle empty clusters and so can scikit
#Log.info("Check that H2O can handle badly initialized centers")
km_sci = KMeans(n_clusters=ncent, init=initial_centers_sci, n_init=1)
km_sci.fit(preprocessing.scale(ozone_sci))
print "scikit final centers"
print km_sci.cluster_centers_
km_h2o = h2o.kmeans(x=ozone_h2o, k=ncent, user_points=initial_centers_h2o, standardize=True)
print "H2O final centers"
print km_h2o.centers()
if __name__ == "__main__":
h2o.run_test(sys.argv, emptyclusKmeans)
| apache-2.0 |
airbnb/airflow | airflow/providers/snowflake/transfers/snowflake_to_slack.py | 5 | 7089 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable, Mapping, Optional, Union
from pandas import DataFrame
from tabulate import tabulate
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.slack.hooks.slack_webhook import SlackWebhookHook
from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook
from airflow.utils.decorators import apply_defaults
class SnowflakeToSlackOperator(BaseOperator):
"""
Executes an SQL statement in Snowflake and sends the results to Slack. The results of the query are
rendered into the 'slack_message' parameter as a Pandas dataframe using a JINJA variable called '{{
results_df }}'. The 'results_df' variable name can be changed by specifying a different
'results_df_name' parameter. The Tabulate library is added to the JINJA environment as a filter to
allow the dataframe to be rendered nicely. For example, set 'slack_message' to {{ results_df |
tabulate(tablefmt="pretty", headers="keys") }} to send the results to Slack as an ascii rendered table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SnowflakeToSlackOperator`
:param sql: The SQL statement to execute on Snowflake (templated)
:type sql: str
:param slack_message: The templated Slack message to send with the data returned from Snowflake.
You can use the default JINJA variable {{ results_df }} to access the pandas dataframe containing the
SQL results
:type slack_message: str
:param snowflake_conn_id: The Snowflake connection id
:type snowflake_conn_id: str
:param slack_conn_id: The connection id for Slack
:type slack_conn_id: str
:param results_df_name: The name of the JINJA template's dataframe variable, default is 'results_df'
:type results_df_name: str
:param parameters: The parameters to pass to the SQL query
:type parameters: Optional[Union[Iterable, Mapping]]
:param warehouse: The Snowflake virtual warehouse to use to run the SQL query
:type warehouse: Optional[str]
:param database: The Snowflake database to use for the SQL query
:type database: Optional[str]
:param schema: The schema to run the SQL against in Snowflake
:type schema: Optional[str]
:param role: The role to use when connecting to Snowflake
:type role: Optional[str]
:param slack_token: The token to use to authenticate to Slack. If this is not provided, the
'webhook_token' attribute needs to be specified in the 'Extra' JSON field against the slack_conn_id
:type slack_token: Optional[str]
"""
template_fields = ['sql', 'slack_message']
template_ext = ['.sql', '.jinja', '.j2']
times_rendered = 0
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
sql: str,
slack_message: str,
snowflake_conn_id: str = 'snowflake_default',
slack_conn_id: str = 'slack_default',
results_df_name: str = 'results_df',
parameters: Optional[Union[Iterable, Mapping]] = None,
warehouse: Optional[str] = None,
database: Optional[str] = None,
schema: Optional[str] = None,
role: Optional[str] = None,
slack_token: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.snowflake_conn_id = snowflake_conn_id
self.sql = sql
self.parameters = parameters
self.warehouse = warehouse
self.database = database
self.schema = schema
self.role = role
self.slack_conn_id = slack_conn_id
self.slack_token = slack_token
self.slack_message = slack_message
self.results_df_name = results_df_name
def _get_query_results(self) -> DataFrame:
snowflake_hook = self._get_snowflake_hook()
self.log.info('Running SQL query: %s', self.sql)
df = snowflake_hook.get_pandas_df(self.sql, parameters=self.parameters)
return df
def _render_and_send_slack_message(self, context, df) -> None:
# Put the dataframe into the context and render the JINJA template fields
context[self.results_df_name] = df
self.render_template_fields(context)
slack_hook = self._get_slack_hook()
self.log.info('Sending slack message: %s', self.slack_message)
slack_hook.execute()
def _get_snowflake_hook(self) -> SnowflakeHook:
return SnowflakeHook(
snowflake_conn_id=self.snowflake_conn_id,
warehouse=self.warehouse,
database=self.database,
role=self.role,
schema=self.schema,
)
def _get_slack_hook(self) -> SlackWebhookHook:
return SlackWebhookHook(
http_conn_id=self.slack_conn_id, message=self.slack_message, webhook_token=self.slack_token
)
def render_template_fields(self, context, jinja_env=None) -> None:
# If this is the first render of the template fields, exclude slack_message from rendering since
# the snowflake results haven't been retrieved yet.
if self.times_rendered == 0:
fields_to_render: Iterable[str] = filter(lambda x: x != 'slack_message', self.template_fields)
else:
fields_to_render = self.template_fields
if not jinja_env:
jinja_env = self.get_template_env()
# Add the tabulate library into the JINJA environment
jinja_env.filters['tabulate'] = tabulate
self._do_render_template_fields(self, fields_to_render, context, jinja_env, set())
self.times_rendered += 1
def execute(self, context) -> None:
if not isinstance(self.sql, str):
raise AirflowException("Expected 'sql' parameter should be a string.")
if self.sql is None or self.sql.strip() == "":
raise AirflowException("Expected 'sql' parameter is missing.")
if self.slack_message is None or self.slack_message.strip() == "":
raise AirflowException("Expected 'slack_message' parameter is missing.")
df = self._get_query_results()
self._render_and_send_slack_message(context, df)
self.log.debug('Finished sending Snowflake data to Slack')
| apache-2.0 |
vlsd/nlsymb | sin_optim.py | 1 | 9685 | # this is written for python2.7
# will not work with python3.3
# TODO figure out why!?
import sys
import numpy as np
import sympy as sym
from sympy import Symbol as S
import nlsymb
# nlsymb = reload(nlsymb)
from nlsymb import Timer, LineSearch, np, colored
from nlsymb.sys import *
from nlsymb.lqr import *
# coming soon to a theatre near you
# DoublePlot
def DPlot(tj, s, fig=None, clear=False,
xlims=(-2.6, 0.2), ylims=(-1.6, 1.1), label="",
**kwargs):
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
# rect = 0.15, 0.1, 0.7, 0.3
axl = fig.add_subplot(121, aspect='equal', xlim=xlims, ylim=ylims,
xlabel="$x(m)$", ylabel="$y(m)$",
title='(a)')
axr = fig.add_subplot(122, aspect='equal', xlim=xlims, ylim=ylims,
xlabel=r"$\bar{x}$", ylabel=r"$\bar{y}$",
title='(b)')
xlist = np.linspace(*xlims, num=200)
bound = axl.fill_between(xlist, ylims[0], np.sin(xlist),
facecolor='grey', alpha=0.5)
bound = axr.fill_between(xlims, ylims[0], 0.0,
facecolor='grey', alpha=0.5)
philbl = axl.text(-6, -4, "$\phi(q)<0$")
psilbl = axr.text(-6, -4, r"$\bar{\phi}(\bar{q})<0$")
[axl, axr] = fig.get_axes()
tj.xtoq(s)
q = np.array(tj._q).T
qb = np.array(map(s.Psi, tj._q)).T
tj.xtonq(s)
z = np.array(tj._q).T
zb = np.array(map(s.Psi, tj._q)).T
axl.plot(q[0], q[1], 'b-', label='q' + label, **kwargs)
axl.plot(z[0], z[1], 'r--', label='z' + label, **kwargs)
axr.plot(qb[0], qb[1], 'b-', label='qb' + label, **kwargs)
axr.plot(zb[0], zb[1], 'r--', label='zb' + label, **kwargs)
fig.show()
# ax.redraw_in_frame()
return fig
# plots a trajectory on the given canvas
def TPlot(tj, s, fig=None, ax=None, init=False,
#xlims=(-3.1, 0.2), ylims=(-1.6, 1.1), label="",
xlims=(-3.1, 0.2), ylims=(-1.6, 1.1), label="",
**kwargs):
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure(figsize=(4, 4))
# rect = 0.15, 0.1, 0.7, 0.3
ax = fig.gca(aspect='equal', xlim=xlims, ylim=ylims,
xlabel="$x(m)$", ylabel="$y(m)$")
xlist = np.linspace(*xlims, num=200)
bound = ax.fill_between(xlist, ylims[0], np.sin(xlist),
facecolor='grey', alpha=0.5)
philbl = ax.text(-1, -1, "$\phi(q)<0$")
if ax is None:
ax = fig.gca()
if init is not False:
ax.set(aspect='equal', xlim=xlims, ylim=ylims,
xlabel="$x(m)$", ylabel="$y(m)$")
xlist = np.linspace(*xlims, num=200)
ax.fill_between(xlist, ylims[0], np.sin(xlist),
facecolor='grey', alpha=0.5)
ax.text(-1, -1, "$\phi(q)<0$")
tj.xtoq(s)
q = np.array(tj._q).T
tj.xtonq(s)
z = np.array(tj._q).T
ax.plot(z[0], z[1], '--', label='z' + label, **kwargs)
ax.plot(q[0], q[1], '-', label='q' + label, lw=1.5, **kwargs)
fig.show()
plt.draw()
return fig
def quickPlot():
fig = TPlot(ref)
# TPlot(itj, fig=fig)
for tj in trajectories:
tj.xtonq(s)
TPlot(tj, fig=fig)
return fig
if __name__ == "__main__":
import matplotlib.pyplot as plt
import time
import pickle
# the following lines are in order to be able to reload nlsymb
# in ipython
# dreload(nlsymb, excludes)
from IPython.lib.deepreload import reload as dreload
excludes = ['time', 'pickle', 'matplotlib.pyplot', 'sys',
'__builtin__', '__main__', 'numpy', 'scipy',
'matplotlib', 'os.path', 'sympy', 'scipy.integrate',
'scipy.interpolate', 'nlsymb.sympy', 'nlsymb.numpy',
'nlsymb.scipy', 'nlsymb.copy', 'copy', 'nlsymb.time',
'scipy.linalg', 'numpy.linalg']
# load the reference (target) trajectory
ref_fn = sys.argv[1]
ref_file = open(ref_fn, 'rb')
ref = pickle.load(ref_file)
ref.feasible = False # let's not assume feasibility
ref_file.close()
# ref.tlims might be all jacked, lemme fix it first
#ref.tlims = (min(ref._t), max(ref._t))
#tlims = ref.tlims
tlims = (0, 3)
ta, tb = tlims
"""
t = np.linspace(0, 10, 100)
x = map(ref.x, t)
u = map(ref.u, t)
"""
with Timer("whole program"):
with Timer("creating symbolic system"):
#s = FlatFloor2D(k=3)
s = SinFloor2D(k=3)
# ref.xtonq(s)
ref.interpolate()
ref.tlims = tlims
if len(sys.argv)>2:
# initial trajectory was passed to us, use it
init_file = open(sys.argv[2], 'rb')
itj = pickle.load(init_file)
init_file.close()
itj.feasible = False # let's not assume feasibility
if not hasattr(itj, 'jumps'):
itj.jumps=[]
else:
# make an initial guess trajectory
qinit = np.array([0.0, 1.0])
qdoti = np.array([0.0, 0.0])
xinit = np.concatenate((s.Psi(qinit),
np.dot(s.dPsi(qinit), qdoti)))
itj = Trajectory('x', 'u')
#tmid1 = (2*tlims[0] + tlims[1])/3
#tmid2 = (tlims[0] + 2*tlims[1])/3
itj.addpoint(tlims[0], x=xinit, u=np.array([0.0, 0.0]))
#itj.addpoint(tmid1, x=ref.x(tmid1), u=np.array([0.0, 0.0]))
#itj.addpoint(tmid2, x=ref.x(tmid2), u=np.array([0.0, 0.0]))
# itj.addpoint(tlims[0], x=ref.x(tlims[0])*1.1, u=ref.u(tlims[0]))
# itj.addpoint(1.5, x=ref.x(1.5), u=ref.u(1.5))
itj.addpoint(tlims[1], x=xinit, u=np.array([0.0, 0.0]))
itj.jumps=[]
itj.xtoq(s)
itj.interpolate()
nlsys = System(s.f, tlims=tlims, xinit=itj.x(tlims[0]),
dfdx=s.dfdx, dfdu=s.dfdu)
nlsys.phi = s.phi
nlsys.ref = ref
nlsys.delf = s.delf
Rcost = lambda t: np.diag([1, 1])
Qcost = lambda t: t*np.diag([100, 200, 1, 1])/tb
#Qcost = lambda t: t*np.diag([10, 10, 1, 1])
PTcost = np.diag([0,0,0,0])
#PTcost = Qcost(tb)
# zerocontrol = Controller(reference=ref)
# nlsys.set_u(zerocontrol)
trajectories = []
costs = []
gradcosts = []
with Timer("initial projection and descent direction"):
tj = nlsys.project(itj, lin=True)
trajectories.append(tj)
cost = nlsys.build_cost(R=Rcost, Q=Qcost, PT=PTcost)
q = lambda t: matmult(tj.x(t) - ref.x(t), Qcost(t))
r = lambda t: matmult(tj.u(t) - ref.u(t), Rcost(t))
qf = matmult(tj.x(tb) - ref.x(tb), PTcost)
descdir = GradDirection(tlims, tj.A, tj.B, jumps=tj.jumps,
q=q, r=r, qf=qf)
descdir.solve()
costs.append(cost(tj))
print("[initial cost]\t\t" +
colored("%f" % costs[-1], 'red'))
ddir = descdir.direction
ddircost = cost(ddir, tspace=True)
gradcosts.append(ddircost)
print("[descent direction]\t" + colored("%f" % ddircost, 'yellow'))
index = 0
ls = None
while ddircost > 1e-3 and index < 20:
index = index + 1
with Timer("line search "):
if index is not 1:
costs.append(cost(tj))
print("[cost]\t\t" + colored("%f" % costs[-1], 'blue'))
ddir = descdir.direction
ddircost = cost(ddir, tspace=True)
gradcosts.append(ddircost)
print("[descent direction]\t" +\
colored("%f" % ddircost, 'yellow'))
if ls is None:
alpha = max(1 / ddircost, 1e-3)
else:
alpha = ls.gamma * 10
ls = LineSearch(cost, cost.grad, alpha=alpha, beta=1e-8)
ls.x = tj
ls.p = descdir.direction
ls.search()
tj = tj + ls.gamma * descdir.direction
# print("cost of trajectory after descent: %f" % cost(tj))
with Timer("second projection"):
tj = nlsys.project(tj, tlims=tlims, lin=True)
trajectories.append(tj)
with Timer("saving trajectory to file"):
ofile = open('pkl/sin_plastic_opt_tj.p','wb')
pickle.dump(tj, ofile)
ofile.close()
cost = nlsys.build_cost(R=Rcost, Q=Qcost, PT=PTcost)
q = lambda t: matmult(tj.x(t) - ref.x(t), Qcost(t))
r = lambda t: matmult(tj.u(t) - ref.u(t), Rcost(t))
qf = matmult(tj.x(tb) - ref.x(tb), PTcost)
with Timer("descent direction"):
descdir = GradDirection(tlims, tj.A, tj.B, jumps=tj.jumps,
q=q, r=r, qf=qf)
descdir.solve()
# tjt = tj
# qref = [s.xtopq(ref.x(t)) for t in tjt._t]
# q0 = map(s.xtopq, trajectories[0]._x)
# qnu = map(s.xtopq, tjt._x)
# plt.plot([qq[0] for qq in q0],
# [np.sin(qq[0]) for qq in q0])
# plt.plot([qq[0] for qq in qref], [qq[1] for qq in qref])
# plt.plot([qq[0] for qq in q0], [qq[1] for qq in q0])
# plt.plot([qq[0] for qq in qnu], [qq[1] for qq in qnu])
# plt.axis('equal')
# plt.show()
| mit |
HolgerPeters/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 56 | 11274 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater, assert_equal, assert_true
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Verifying that steps < 1 don't blow up.
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfecv_verbose_output():
# Check verbose=1 is producing an output.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
sys.stdout = StringIO()
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, verbose=1)
rfecv.fit(X, y)
verbose_output = sys.stdout
verbose_output.seek(0)
assert_greater(len(verbose_output.readline()), 0)
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
GenericMappingTools/gmt-python | pygmt/src/blockm.py | 1 | 5803 | """
blockm - Block average (x,y,z) data tables by mean or median estimation.
"""
import pandas as pd
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
def _blockm(block_method, table, outfile, x, y, z, **kwargs):
r"""
Block average (x,y,z) data tables by mean or median estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] from a table and writes to the output a mean or
median (depending on ``block_method``) position and value for every
non-empty block in a grid region defined by the ``region`` and ``spacing``
parameters.
Parameters
----------
block_method : str
Name of the GMT module to call. Must be "blockmean" or "blockmedian".
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``)
"""
with GMTTempFile(suffix=".csv") as tmpfile:
with Session() as lib:
# Choose how data will be passed into the module
table_context = lib.virtualfile_from_data(
check_kind="vector", data=table, x=x, y=y, z=z
)
# Run blockm* on data table
with table_context as infile:
if outfile is None:
outfile = tmpfile.name
arg_str = " ".join([infile, build_arg_string(kwargs), "->" + outfile])
lib.call_module(module=block_method, args=arg_str)
# Read temporary csv output to a pandas table
if outfile == tmpfile.name: # if user did not set outfile, return pd.DataFrame
try:
column_names = table.columns.to_list()
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
except AttributeError: # 'str' object has no attribute 'columns'
result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">")
elif outfile != tmpfile.name: # return None if outfile set, output in outfile
result = None
return result
@fmt_docstring
@use_alias(
I="spacing",
R="region",
V="verbose",
a="aspatial",
f="coltypes",
i="incols",
r="registration",
)
@kwargs_to_strings(R="sequence")
def blockmean(table=None, outfile=None, *, x=None, y=None, z=None, **kwargs):
r"""
Block average (x,y,z) data tables by mean estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] and writes to the output a mean position and value
for every non-empty block in a grid region defined by the ``region`` and
``spacing`` parameters.
Takes a matrix, xyz triplets, or a file name as input.
Must provide either ``table`` or ``x``, ``y``, and ``z``.
Full option list at :gmt-docs:`blockmean.html`
{aliases}
Parameters
----------
table : str or {table-like}
Pass in (x, y, z) or (longitude, latitude, elevation) values by
providing a file name to an ASCII data table, a 2D
{table-classes}.
x/y/z : 1d arrays
Arrays of x and y coordinates and values z of the data points.
{I}
{R}
outfile : str
The file name for the output ASCII file.
{V}
{a}
{i}
{f}
{r}
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set.
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``).
"""
return _blockm(
block_method="blockmean", table=table, outfile=outfile, x=x, y=y, z=z, **kwargs
)
@fmt_docstring
@use_alias(
I="spacing",
R="region",
V="verbose",
a="aspatial",
f="coltypes",
i="incols",
r="registration",
)
@kwargs_to_strings(R="sequence")
def blockmedian(table=None, outfile=None, *, x=None, y=None, z=None, **kwargs):
r"""
Block average (x,y,z) data tables by median estimation.
Reads arbitrarily located (x,y,z) triples [or optionally weighted
quadruples (x,y,z,w)] and writes to the output a median position and value
for every non-empty block in a grid region defined by the ``region`` and
``spacing`` parameters.
Takes a matrix, xyz triplets, or a file name as input.
Must provide either ``table`` or ``x``, ``y``, and ``z``.
Full option list at :gmt-docs:`blockmedian.html`
{aliases}
Parameters
----------
table : str or {table-like}
Pass in (x, y, z) or (longitude, latitude, elevation) values by
providing a file name to an ASCII data table, a 2D
{table-classes}.
x/y/z : 1d arrays
Arrays of x and y coordinates and values z of the data points.
{I}
{R}
outfile : str
The file name for the output ASCII file.
{V}
{a}
{f}
{i}
{r}
Returns
-------
output : pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, z) columns if ``outfile``
is not set.
- None if ``outfile`` is set (filtered output will be stored in file
set by ``outfile``).
"""
return _blockm(
block_method="blockmedian",
table=table,
outfile=outfile,
x=x,
y=y,
z=z,
**kwargs
)
| bsd-3-clause |
jmontgom10/Mimir_pyPol | 01a_buildIndex.py | 1 | 12298 | # -*- coding: utf-8 -*-
"""
Restructures the rawFileIndex from PRISM_pyBDP to contain ONLY the science
images, and break those up into individual groups based on changes in
1) OBJECT (object name)
2) FILTER (optical filter value)
3) EXPTIME (the exposure time of the images)
4) Pointing changes (more than 1.5 degrees of chang is considered a new group)
Attempts to associate each group with a target in the 'targetList' variable on
the basis of the string in the OBJECT column of that group.
Saves the index file with a USE and GROUP_ID columns added to the table.
"""
#Import whatever modules will be used
import os
import sys
import datetime
import time
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
import astropy.units as u
from scipy import stats
# Add the AstroImage class
import astroimage as ai
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Set the directory for the PPOL reduced data
PPOL_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611\\notPreFlattened'
S3_dir = os.path.join(PPOL_data, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data='C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
if (not os.path.isdir(pyPol_data)):
os.mkdir(pyPol_data, 0o755)
# Set the filename for the reduced data indexFile
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
# Compose a list of expected targets. All groups will be assigned to ONE of
# these targets within a given tolerance. If no match is found for a group
# within this list of targets, then an error will be raised.
targetList = [
'M78',
'NGC7023',
'NGC2023'
]
# Create a dictionary with known group name problems. They keys should be the
# name of the group as it is currently, and the value should be the name of the
# group as it *ought* to be.
problematicGroupNames = {'NGC723_H3': 'NGC7023_H3'}
# Force all the targets to be upper case to remove ambiguity
targetList = [t.upper() for t in targetList]
################################################################################
# Generate a list of files in the 'polarimetry' directories
# fileList = np.array(recursive_file_search(BDP_data, exten='.fits'))
fileList = np.array(os.listdir(S3_dir))
#Sort the fileList
fileNums = [''.join((os.path.basename(f).split('.'))[0:2]) for f in fileList]
fileNums = np.array([f.split('_')[0] for f in fileNums], dtype=np.int64)
sortInds = fileNums.argsort()
fileList = fileList[sortInds]
# Define a dictionary for translating HWP rotation into IPPA
HWPstepList = np.array([
0, 33, 67, 100,
133, 167, 200, 233,
267, 300, 333, 367,
400, 433, 467, 500
])
HWPlist = np.arange(16, dtype=int) + 1
IPPAlist = np.array(4*[0, 45, 90, 135])
HWPstep_to_HWP = dict(zip(HWPstepList, HWPlist))
HWPstep_to_IPPA = dict(zip(HWPstepList, IPPAlist))
# Test for image type
print('\nCategorizing files into TARGET, HWP, BAAB\n')
startTime = time.time()
# Begin by initalizing some arrays to store the image classifications
OBJECT = []
OBSTYPE = []
FILTER = []
TELRA = []
TELDEC = []
EXPTIME = []
HWP = []
IPPA = []
AB = []
NIGHT = []
MJD = []
percentage = 0
#Loop through each file in the fileList variable
numberOfFiles = len(fileList)
for iFile, filename in enumerate(fileList):
# Read in the file
thisFile = os.path.join(S3_dir, filename)
thisHDU = fits.open(thisFile)
thisHeader = thisHDU[0].header
# Grab the OBJECT header value
tmpOBJECT = thisHeader['OBJECT']
if len(tmpOBJECT) < 1:
tmpOBJECT = 'blank'
OBJECT.append(tmpOBJECT)
# Grab the OBSTYPE header value
OBSTYPE.append(thisHeader['OBSTYPE'])
# Grab the FILTNME3 header value
FILTER.append(thisHeader['FILTNME2'])
# Grab the TELRA header value
try:
TELRA.append(thisHeader['TELRA'])
except:
TELRA.append(0)
try:
TELDEC.append(thisHeader['TELDEC'])
except:
TELDEC.append(0)
# Grab the HWP header value
thisHWP = thisHeader['HWP']
HWPdiff = np.abs(HWPstepList - thisHWP)
thisHWPstep = HWPstepList[HWPdiff.argmin()]
thisHWP = HWPstep_to_HWP[thisHWPstep]
HWP.append(thisHWP)
# Apped the IPPA equivalent
IPPA.append(HWPstep_to_IPPA[thisHWPstep])
# Search for the A-pos B-pos value
if 'COMMENT' in thisHeader:
# This is SUPER lazy, but it gets the job done
for thisComment in thisHeader['COMMENT']:
if 'HWP' in thisComment and 'posn' in thisComment:
thisAB = thisComment[-6]
else:
thisAB = 'A'
else:
thisAB = 'A'
# Append the AB value to the list
if thisAB == 'A' or thisAB == 'B':
AB.append(thisAB)
else:
# Oops... something went wrong. You should only have As or Bs
import pdb; pdb.set_trace()
# Grab the EXPTIME value from the header
EXPTIME.append(thisHeader['EXPTIME'])
# Assign a NIGHT value for this image
NIGHT.append(''.join((os.path.basename(filename).split('.'))[0]))
############################################################################
# Compute the julian date for this observation
# Compute proleptic Gregorian date (Number of days since 0001-01-01 AD)
thisDatetime = datetime.datetime.strptime(
thisHeader['DATE'],
'%Y-%m-%dT%H:%M:%S'
)
prolepticGregorianDate = thisDatetime.toordinal()
# Grab the time of this observation
tmpTime = thisDatetime.time()
# Compute the fraction of a day represented by the above time
fractionOfDay = (
(tmpTime.hour + (tmpTime.minute + (tmpTime.second/60.0))/60.0)/24.0
)
# Compute the julian date (including the fraction of a day)
julianDate = prolepticGregorianDate + fractionOfDay + 1721424.5
thisMJD = julianDate - 2400000.5
MJD.append(julianDate)
############################################################################
# Count the files completed and print update progress message
percentage1 = np.floor(100*iFile/numberOfFiles)
if percentage1 != percentage:
print('completed {0:3g}%'.format(percentage1), end="\r")
percentage = percentage1
print('completed {0:3g}%'.format(100), end="\r")
endTime = time.time()
print('\nFile processing completed in {0:g} seconds'.format(endTime - startTime))
# Query the user about the targets of each group...
# Write the file index to disk
reducedFileIndex = Table(
[fileList, NIGHT, MJD, OBSTYPE, OBJECT,
FILTER, TELRA, TELDEC, EXPTIME, HWP, IPPA, AB],
names = ['FILENAME', 'NIGHT', 'MJD', 'OBSTYPE', 'OBJECT',
'FILTER', 'TELRA', 'TELDEC', 'EXPTIME', 'HWP', 'IPPA', 'AB'])
# Remap the filenames to be the reduced filenames
fileBasenames = [os.path.basename(f) for f in reducedFileIndex['FILENAME']]
# reducedFilenames = [os.path.join(pyBDP_reducedDir, f) for f in fileBasenames]
# reducedFileIndex['FILENAME'] = reducedFilenames
# Find the breaks in observation procedure. These are candidates for group
# boundaries.
# 1) OBJECT changes
objectChange = (reducedFileIndex['OBJECT'][1:] != reducedFileIndex['OBJECT'][0:-1])
# 2) OBSTYPE changes
obstypeChange = (reducedFileIndex['OBSTYPE'][1:] != reducedFileIndex['OBSTYPE'][0:-1])
# 3) FILTER changes
filterChange = (reducedFileIndex['FILTER'][1:] != reducedFileIndex['FILTER'][0:-1])
# 4) EXPTIME changes
expTimeChange = (reducedFileIndex['EXPTIME'][1:] != reducedFileIndex['EXPTIME'][0:-1])
# 5) Pointing changes
# Look for any pointing differences 1.5 degree (or more) for further separations
allPointings = SkyCoord(
reducedFileIndex['TELRA'],
reducedFileIndex['TELDEC'],
unit=(u.hour, u.degree)
)
medianDecs = 0.5*(allPointings[1:].ra.to(u.rad) + allPointings[0:-1].ra.to(u.rad))
deltaDec = allPointings[1:].dec - allPointings[0:-1].dec
deltaRA = (allPointings[1:].ra - allPointings[0:-1].ra)*np.cos(medianDecs)
deltaPointing = np.sqrt(deltaRA**2 + deltaDec**2)
pointingChange = deltaPointing > (1.5*u.deg)
# Identify all changes
allChanges = objectChange
allChanges = np.logical_or(allChanges, obstypeChange)
allChanges = np.logical_or(allChanges, filterChange)
allChanges = np.logical_or(allChanges, expTimeChange)
allChanges = np.logical_or(allChanges, pointingChange)
# Assign a GROUP_ID for each group
groupBoundaries = np.hstack([0, np.where(allChanges)[0] + 1, allChanges.size])
groupIDs = []
for i in range(groupBoundaries.size - 1):
# Find the start and end indices of the group
groupStartInd = groupBoundaries[i]
groupEndInd = groupBoundaries[i+1]
# Build the gorup ID number
groupID = i + 1
# Count the number of images in this group
numberOfImages = groupEndInd - groupStartInd
# Build the list of ID numbers for THIS group and append it to the full list
thisGroupID = numberOfImages*[groupID]
groupIDs.extend(thisGroupID)
# Fill in the final entry
groupIDs.append(groupID)
# Store the groupID number in the reducedFileIndex
groupIDcolumn = Column(name='GROUP_ID', data=groupIDs)
reducedFileIndex.add_column(groupIDcolumn, index=2)
# Now remove any GROUPS with less than 8 images
groupIndex = reducedFileIndex.group_by('GROUP_ID')
goodGroupInds = []
groupInds = groupIndex.groups.indices
for startInd, endInd in zip(groupInds[:-1], groupInds[+1:]):
# Count the number of images in this group and test if it's any good.
if (endInd - startInd) >= 8:
goodGroupInds.extend(range(startInd, endInd))
# Cull the reducedFileIndex to only include viable groups
goodGroupInds = np.array(goodGroupInds)
reducedFileIndex = reducedFileIndex[goodGroupInds]
# Match a dither type for each group ("ABBA" or "HEX")
groupIndex = reducedFileIndex.group_by('GROUP_ID')
ditherType = []
for group in groupIndex.groups:
# Count the number of images in this group
numberOfImages = len(group)
# Test if this is an ABBA or HEX dither
if ('A' in group['AB']) and ('B' in group['AB']):
ditherType.extend(numberOfImages*['ABBA'])
if ('A' in group['AB']) and not ('B' in group['AB']):
ditherType.extend(numberOfImages*['HEX'])
# Store the ditherNames number in the reducedFileIndex
ditherTypeColumn = Column(name='DITHER_TYPE', data=ditherType)
groupIndex.add_column(ditherTypeColumn, index=10)
# Identify meta-groups pointing at a single target with a single dither style.
targets = []
for group in groupIndex.groups:
# Count the number of images in this group
numberOfImages = len(group)
# Get the group name
groupName = np.unique(group['OBJECT'])[0]
# Capitalize the group name to remove ambiguity
groupName = groupName.upper()
# Rename the group if it needs to be renamed
if groupName in problematicGroupNames:
groupName = problematicGroupNames[groupName]
# Test it a target name occurs in this group name
for target in targetList:
if target in groupName:
targets.extend(numberOfImages*[target])
break
else:
import pdb; pdb.set_trace()
raise ValueError('Gorup {} found no match in the target list'.format(groupName))
# Add the target identifications to the groupIndex table
targetColumn = Column(name='TARGET', data=targets)
groupIndex.add_column(targetColumn, index=5)
# Re-order by filename. Start by getting the sorting array
sortInds = groupIndex['FILENAME'].data.argsort()
reducedFileIndex = groupIndex[sortInds]
# Add a "BACKGROUND" column
reducedFileIndex['BACKGROUND'] = -1e6*np.ones(len(reducedFileIndex))
# Finally, add a column of "use" flags at the first index
useColumn = Column(name='USE', data=np.ones((len(reducedFileIndex),), dtype=int))
reducedFileIndex.add_column(useColumn, index=0)
# Save the index to disk.
reducedFileIndex.write(indexFile, format='ascii.csv', overwrite=True)
print('Done!')
| mit |
wbap/Hackathon2015 | Hiroshiba/NeuralNetwork/test_LastML.py | 2 | 11668 |
# coding: utf-8
# In[ ]:
import os
import sys
import re # for regex
import math
import json
import pickle
from PIL import Image
import numpy as np
from sklearn.datasets import fetch_mldata
# import matplotlib.pyplot as plt
# get_ipython().magic('matplotlib inline')
from chainer import cuda, Function, FunctionSet, gradient_check, Variable, optimizers
import chainer.functions as F
from dA import DenoisingAutoencoder
from SdA import StackedDenoisingAutoencoder
import utils
# In[ ]:
## Params
use_cuda = True
batchsize = 100
if use_cuda:
n_epoch_SdA = 30
n_epoch_fA = 50
n_epoch_last = 30
else:
n_epoch_SdA = 5
n_epoch_fA = 10
n_epoch_last = 30
if use_cuda:
path_imagedir = {'self':os.environ['HOME'] + '/Hevy/wba_hackathon/self_mit_fp_50x1000/', 'other':os.environ['HOME'] + '/Hevy/wba_hackathon/other_mit_fp_50x100/'}
n_dataset_self = 1000
n_dataset_other = 100
else:
path_imagedir = {'self':os.environ['HOME'] + '/Hevy/wba_hackathon/self_mit_fp_50x100/', 'other':os.environ['HOME'] + '/Hevy/wba_hackathon/other_mit_fp_50x100/'}
n_dataset_self = 100
n_dataset_other = 100
n_dataset = n_dataset_self + n_dataset_other
size_image = [64, 64]
scale = 4
size_image[0]=size_image[0]/scale
size_image[1]=size_image[1]/scale
n_hold = 10
n_cross = 1
n_fA_node = 128
n_loss_node = 48
n_moveframe = 50
n_oneframe = 5
n_onemovie = int(n_moveframe / n_oneframe)
if use_cuda:
n_hiddens = (18**2, 12**2, 6**2)
else:
n_hiddens = (8**2, 2**2)
num_images = n_dataset * n_moveframe
num_movie = num_images / n_oneframe
num_test_dataset = n_dataset // n_hold
num_train_dataset = n_dataset - num_test_dataset
num_test_movie = num_movie // n_hold
num_train_movie = num_movie - num_test_movie
if use_cuda:
cuda.check_cuda_available()
# In[ ]:
## load images
size = size_image[0]
num_pximage = size**2
num_pxmovie = n_oneframe * num_pximage
# load images
movies = np.zeros((n_dataset, n_moveframe, num_pximage), dtype=np.float32)
i = 0
for label in {'self', 'other'}:
for name in os.listdir(path_imagedir[label]):
if re.match( '.*png$', name ):
img = Image.open( os.path.join(path_imagedir[label], name) )
img.thumbnail( (size_image[0], size_image[1]) )
img = np.asarray(img, dtype=np.float32).mean(axis=2).T
movies[i//n_moveframe, i%n_moveframe, :] = np.reshape( img / 255.0, (1, -1) )
i = i+1
## load json files
joint_angles = [{}] * num_images
i = 0
for label in {'self', 'other'}:
for name in os.listdir(path_imagedir[label]):
if re.match( '.*json$', name ):
j = json.load( open(os.path.join(path_imagedir[label], name)) )
joint_angles[i] = j['joint_angle']
i = i+1
# In[ ]:
## setup ML values
v_all = np.reshape(movies, (n_dataset, -1))
v_all = utils.splitInputs(v_all, n_moveframe/n_oneframe)
num_node_x = 8
x_all = np.zeros((num_images, num_node_x), dtype=np.float32)
for i in range(len(joint_angles)):
x_all[i][0:3] = [joint_angles[i]['left_shoulder']['y'], joint_angles[i]['left_shoulder']['p'], joint_angles[i]['left_shoulder']['r']]
x_all[i][3] = joint_angles[i]['left_elbow']['p']
x_all[i][4:7] = [joint_angles[i]['right_shoulder']['y'], joint_angles[i]['right_shoulder']['p'], joint_angles[i]['right_shoulder']['r']]
x_all[i][7] = joint_angles[i]['right_elbow']['p']
x_all = x_all/180
x_all = utils.bindInputs(x_all, n_moveframe)
x_all = utils.splitInputs(x_all, n_moveframe/n_oneframe)
# x_all[0:n_dataset, :, :] = np.random.rand(x_all.shape[-1])
# v_all[0:n_dataset_self, :, :] = np.random.rand(v_all.shape[-1])
# v_all[n_dataset_self:n_dataset, :, :] = np.random.rand(n_dataset_other, n_onemovie, v_all.shape[-1])
# label 0:other, 1:self
label_x = np.append( np.ones((n_dataset_self), dtype=np.int32), np.zeros((n_dataset_other), dtype=np.int32) )
# In[ ]:
# shuffle all data
rng = np.random.RandomState(1234)
indices = np.arange(n_dataset, dtype=np.int32)
rng.shuffle(indices)
v_all = v_all[indices]
x_all = x_all[indices]
label_x = label_x[indices]
n_set = n_dataset / n_hold
# split each data into 10 block
v_s = np.split(v_all, n_set*np.r_[1:n_hold])
x_s = np.split(x_all, n_set*np.r_[1:n_hold])
label_x_s = np.split(label_x, n_set*np.r_[1:n_hold])
num_layers= len(n_hiddens)
# In[ ]:
def forward(x_data, y_data):
x = Variable(x_data); t = Variable(y_data)
h = F.sigmoid(model.l1(x))
y = model.l2(h)
return F.mean_squared_error(y, t), y
def forwardLastML(x_data, y_data):
x = Variable(x_data); t = Variable(y_data)
h = F.sigmoid(model.l1(x))
y = model.l2(h)
return F.softmax_cross_entropy(y, t), y
list_cross = []
for i in range(n_cross):
# split test and train data
set_l = list(set(range(n_hold)).difference([i]))
v_train = np.empty(0, dtype=np.float32)
x_train = np.empty(0, dtype=np.float32)
label_train = np.empty(0, dtype=np.int32)
for i_set in range(n_hold-1):
v_train = utils.vstack_(v_train, v_s[set_l[i_set]])
x_train = utils.vstack_(x_train, x_s[set_l[i_set]])
label_train = utils.vstack_(label_train, label_x_s[set_l[i_set]])
v_train = np.reshape(v_train, (num_train_movie, -1))
x_train = np.reshape(x_train, (num_train_movie, -1))
label_train = np.reshape(label_train, (num_train_dataset, -1))
v_test = np.reshape(v_s[i], (num_test_movie, -1))
x_test = np.reshape(x_s[i], (num_test_movie, -1))
label_test = label_x_s[i]
# create SdA
sda = StackedDenoisingAutoencoder(num_pxmovie, n_hiddens, n_epoch=n_epoch_SdA, use_cuda=use_cuda)
sda.train(v_train)
# split test and train data
y_train_each = sda.predict(v_train, bAllLayer=True)
y_test_each = sda.predict(v_test, bAllLayer=True)
list_layer = []
for j in range(num_layers):
y_train = y_train_each[j]
y_test = y_test_each[j]
# separate x&y into other and self
x_test_split = [np.empty(0,dtype=np.float32), np.empty(0,dtype=np.float32)]
y_test_split = [np.empty(0,dtype=np.float32), np.empty(0,dtype=np.float32)]
for i_test in range(int(num_test_movie)):
label = label_test[i_test//n_onemovie]
x_test_split[label] = utils.vstack_(x_test_split[label], x_test[i_test])
y_test_split[label] = utils.vstack_(y_test_split[label], y_test[i_test])
# train with only self body
num_train_self = 0
x_train_split = np.empty(0,dtype=np.float32)
y_train_split = np.empty(0,dtype=np.float32)
for i_train in range(int(num_train_movie)):
if label_train[i_train//n_onemovie]==1:
x_train_split = utils.vstack_(x_train_split, x_train[i_train])
y_train_split = utils.vstack_(y_train_split, y_train[i_train])
num_train_self = num_train_self + 1
# f(x->y)
model = FunctionSet(
l1 = F.Linear(num_node_x*n_oneframe, n_fA_node),
l2 = F.Linear(n_fA_node, n_hiddens[j])
)
optimizer = optimizers.SGD()
optimizer.setup(model.collect_parameters())
dic = {'loss':{}, 'hist':{}, 'lastpredict':{}}
dic['loss'] = {'self':np.empty(0,dtype=np.float32), 'other':np.empty(0,dtype=np.float32)}
for epoch in range(n_epoch_fA):
indexes = np.random.permutation(int(num_train_self))
sum_loss = 0
for k in range(0, num_train_self, batchsize):
x_batch = x_train_split[indexes[k : k + batchsize]]
y_batch = y_train_split[indexes[k : k + batchsize]]
optimizer.zero_grads()
loss, output = forward(x_batch, y_batch)
loss.backward()
optimizer.update()
sum_loss = sum_loss+loss.data*batchsize
print('fA: epoch:'+str(epoch)+' loss:' + str(sum_loss/num_train_movie))
# test
loss, output = forward(x_test_split[1], y_test_split[1])
dic['loss']['self'] = utils.vstack_(dic['loss']['self'], loss.data)
loss, output = forward(x_test_split[0], y_test_split[0])
dic['loss']['other'] = utils.vstack_(dic['loss']['other'], loss.data)
print('test loss:' + str(loss.data))
dic['hist'] = {'self':np.empty(0, dtype=np.float32), 'other':np.empty(0, dtype=np.float32)}
for i_test in range((x_test_split[1].shape[0])):
loss, output = forward(x_test_split[1][i_test][None], y_test_split[1][i_test][None]) # [8,][None] -> [1,8]
dic['hist']['self'] = utils.vstack_(dic['hist']['self'], loss.data)
for i_test in range(x_test_split[0].shape[0]):
loss, output = forward(x_test_split[0][i_test][None], y_test_split[0][i_test][None])
dic['hist']['other'] = utils.vstack_(dic['hist']['other'], loss.data)
# loss => self or other
loss_train = np.zeros((num_train_dataset, n_onemovie), dtype=np.float32)
for i_train in range(num_train_dataset):
for i_movie in range(n_onemovie):
loss, output = forward(x_train[i_train*n_onemovie+i_movie][None], y_train[i_train*n_onemovie+i_movie][None])
loss_train[i_train, i_movie] = loss.data
loss_test = np.zeros((num_test_dataset, n_onemovie), dtype=np.float32)
for i_test in range(num_test_dataset):
for i_movie in range(n_onemovie):
loss, output = forward(x_test[i_test*n_onemovie+i_movie][None], y_test[i_test*n_onemovie+i_movie][None])
loss_test[i_test, i_movie] = loss.data
m = np.r_[loss_train, loss_test].max()
loss_train = loss_train / m
loss_test = loss_train / m
model = FunctionSet(
l1 = F.Linear(n_onemovie, n_loss_node),
l2 = F.Linear(n_loss_node, 2)
)
optimizer = optimizers.SGD()
optimizer.setup(model.collect_parameters())
for epoch in range(n_epoch_last):
indexes = np.random.permutation(int(num_train_dataset))
sum_loss = 0
for k in range(0, int(num_train_dataset), batchsize):
x_batch = loss_train[indexes[k : k + batchsize]]
y_batch = label_train[indexes[k : k + batchsize]].ravel()
optimizer.zero_grads()
loss, output = forwardLastML(x_batch, y_batch)
loss.backward()
optimizer.update()
sum_loss = sum_loss+loss.data*batchsize
print('LastML: epoch:'+str(epoch)+' loss:' + str(sum_loss/num_train_dataset))
dic['lastpredict']['label'] = label_test
dic['lastpredict']['pedict'] = np.empty(0, dtype=np.int)
dic['lastpredict']['output'] = np.empty(0, dtype=np.float)
for i_test in range(num_test_dataset):
loss, output = forwardLastML(loss_test[i_test][None], label_test[i_test].ravel())
dic['lastpredict']['output'] = utils.vstack_(dic['lastpredict']['output'], output.data)
if output.data[0,0] > output.data[0,1]:
dic['lastpredict']['pedict'] = utils.vstack_(dic['lastpredict']['pedict'], 0)
else:
dic['lastpredict']['pedict'] = utils.vstack_(dic['lastpredict']['pedict'], 1)
list_layer.append(dic)
list_cross.append(list_layer)
# In[ ]:
# save data
f = open('save.dump', 'wb')
pickle.dump(list_cross, f)
print('finish!')
# In[ ]:
| apache-2.0 |
toobaz/pandas | pandas/tests/util/test_util.py | 2 | 2648 | import os
import sys
import pytest
import pandas.compat as compat
from pandas.compat import raise_with_traceback
import pandas.util.testing as tm
def test_rands():
r = tm.rands(10)
assert len(r) == 10
def test_rands_array_1d():
arr = tm.rands_array(5, size=10)
assert arr.shape == (10,)
assert len(arr[0]) == 5
def test_rands_array_2d():
arr = tm.rands_array(7, size=(10, 10))
assert arr.shape == (10, 10)
assert len(arr[1, 1]) == 7
def test_numpy_err_state_is_default():
expected = {"over": "warn", "divide": "warn", "invalid": "warn", "under": "ignore"}
import numpy as np
# The error state should be unchanged after that import.
assert np.geterr() == expected
def test_raise_with_traceback():
with pytest.raises(LookupError, match="error_text"):
try:
raise ValueError("THIS IS AN ERROR")
except ValueError:
e = LookupError("error_text")
raise_with_traceback(e)
with pytest.raises(LookupError, match="error_text"):
try:
raise ValueError("This is another error")
except ValueError:
e = LookupError("error_text")
_, _, traceback = sys.exc_info()
raise_with_traceback(e, traceback)
def test_convert_rows_list_to_csv_str():
rows_list = ["aaa", "bbb", "ccc"]
ret = tm.convert_rows_list_to_csv_str(rows_list)
if compat.is_platform_windows():
expected = "aaa\r\nbbb\r\nccc\r\n"
else:
expected = "aaa\nbbb\nccc\n"
assert ret == expected
def test_create_temp_directory():
with tm.ensure_clean_dir() as path:
assert os.path.exists(path)
assert os.path.isdir(path)
assert not os.path.exists(path)
def test_assert_raises_regex_deprecated():
# see gh-23592
with tm.assert_produces_warning(FutureWarning):
msg = "Not equal!"
with tm.assert_raises_regex(AssertionError, msg):
assert 1 == 2, msg
@pytest.mark.parametrize("strict_data_files", [True, False])
def test_datapath_missing(datapath):
with pytest.raises(ValueError, match="Could not find file"):
datapath("not_a_file")
def test_datapath(datapath):
args = ("data", "iris.csv")
result = datapath(*args)
expected = os.path.join(os.path.dirname(os.path.dirname(__file__)), *args)
assert result == expected
def test_rng_context():
import numpy as np
expected0 = 1.764052345967664
expected1 = 1.6243453636632417
with tm.RNGContext(0):
with tm.RNGContext(1):
assert np.random.randn() == expected1
assert np.random.randn() == expected0
| bsd-3-clause |
AnasGhrab/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
winklerand/pandas | pandas/tests/test_algos.py | 1 | 55742 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
expected_index = IntervalIndex.from_breaks(breaks).astype('category')
expected = Series([1, 1, 1, 1], index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.mark.xfail(reason="Complex bug. GH 16399")(
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j])
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category'),
Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
def test_scipy_compat(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0], dtype='int64')]
assert (not libalgos.is_lexsorted(failure))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
# np.argsort returns int, groupsort_indexer
# always returns int64
expected = np.argsort(a, kind='mergesort')
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
# compare with lexsort
# np.lexsort returns int, groupsort_indexer
# always returns int64
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
expected = expected.astype(np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 13 | 7616 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.model_selection import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_max_samples_consistency():
# Make sure validated max_samples in iforest and BaseBagging are identical
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, clf._max_samples)
def test_iforest_subsampled_features():
# It tests non-regression for #5732 which failed at predict.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
clf = IsolationForest(max_features=0.8)
clf.fit(X_train, y_train)
clf.predict(X_test)
| bsd-3-clause |
BeiLuoShiMen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
abele/bokeh | examples/compat/mpl/subplots.py | 34 | 1826 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
output_file("subplots.html")
show(mpl.to_bokeh())
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
rangwala/dmgrader | gradingwebapp/gmugrader/fileuploader/views.py | 1 | 21915 | from django.http import HttpResponseRedirect,HttpResponse
from django.core.urlresolvers import reverse
from django.template import loader
from django.shortcuts import render_to_response, render, get_object_or_404
from django.core.context_processors import csrf
from django.conf import settings
from .forms import submissionAssignmentForm, submissionForm, AssignmentForm, ArticleForm, UserForm, ClassForm # UserProfileForm
from .models import Article, Assignment, Solution, Class
from django.template import RequestContext
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from sklearn import metrics, cross_validation
import numpy as np
import matplotlib.pyplot as plt
import datetime
from django.utils import timezone
from datetime import datetime, timedelta
from django.db.models import Max, Min
def computeSampledMetrics (predfile, solfile,samplesize,scoring_method):
myPredFile = open (settings.MEDIA_ROOT + str(predfile), 'r')
#myPredFile = open (settings.MEDIA_ROOT + '/solution_files/sol.txt', 'r')
myTrueFile = open (settings.MEDIA_ROOT + str(solfile), 'r')
predictions = []
ground = []
for predline in myPredFile:
predictions.append(predline)
for trueline in myTrueFile:
ground.append(trueline)
ground = np.array (ground)
predictions= np.array (predictions)
rs = cross_validation.ShuffleSplit (len(ground), n_iter=1, test_size=0.01 * samplesize, random_state=0)
for train_index, test_index in rs:
sample_ground = ground [test_index]
sample_predictions = predictions [test_index]
#print np.mean (sample_ground == sample_predictions)
#print metrics.classification_report (sample_ground, sample_predictions)
#return metrics.f1_score (sample_ground,sample_predictions, pos_label=1)
if scoring_method == 'RE' or scoring_method == 'RC':
ypred = np.array (sample_predictions, dtype=np.float)
if scoring_method == 'RE':
ytrue = np.array (sample_ground, dtype=np.float)
else:
ytrue = np.array (sample_ground,dtype=np.int)
else:
ytrue = np.array(sample_ground,dtype=np.int)
ypred = np.array(sample_predictions,dtype=np.int)
if scoring_method == 'F1':
return metrics.f1_score(ytrue,ypred,pos_label=1)
if scoring_method == 'AC':
return metrics.accuracy_score(ytrue, ypred)
if scoring_method == 'V1':
return metrics.v_measure_score(ytrue, ypred)
if scoring_method == 'RE':
return metrics.mean_squared_error (ytrue, ypred) ** 0.5
if scoring_method == 'RC':
return metrics.roc_auc_score (ytrue, ypred)
# return metrics.accuracy_score(sample_ground, sample_predictions)
"""
corr = 0
for i in range (len(ground)):
if (ground[i] == predictions[i]):
corr = corr+1;
print corr
myPredFile.close()
myTrueFile.close()
return (1.0 * corr)/len(ground)
"""
def computeMetrics (predfile, solfile, scoring_method):
myPredFile = open (settings.MEDIA_ROOT + str(predfile), 'r')
#myPredFile = open (settings.MEDIA_ROOT + '/solution_files/sol.txt', 'r')
myTrueFile = open (settings.MEDIA_ROOT + str(solfile), 'r')
predictions = []
ground = []
for predline in myPredFile:
predictions.append(predline)
for trueline in myTrueFile:
ground.append(trueline)
if len(predictions) != len(ground):
return -100.0
else:
#print np.mean (ground == predictions)
#print metrics.classification_report (ground, predictions)
print ("Hi")
if scoring_method == 'RE' or scoring_method == 'RC':
ypred = np.array (predictions, dtype=np.float)
if scoring_method == 'RE':
ytrue = np.array (ground, dtype=np.float)
else:
ytrue = np.array (ground, dtype=np.int)
else:
ytrue = np.array(ground,dtype=np.int)
ypred = np.array(predictions,dtype=np.int)
if scoring_method == 'F1':
return metrics.f1_score(ytrue,ypred,pos_label=1)
if scoring_method == 'AC':
return metrics.accuracy_score(ytrue, ypred)
if scoring_method == 'V1':
return metrics.v_measure_score(ytrue, ypred)
if scoring_method == 'RE':
return metrics.mean_squared_error (ytrue, ypred) ** 0.5
if scoring_method == 'RC':
return metrics.roc_auc_score (ytrue, ypred)
"""
corr = 0
for i in range (len(ground)):
if (ground[i] == predictions[i]):
corr = corr+1;
print corr
myPredFile.close()
myTrueFile.close()
return (1.0 * corr)/len(ground)
"""
def get_accuracy_value (filename):
myPredFile = open (settings.MEDIA_ROOT + str(filename), 'r')
#myPredFile = open (settings.MEDIA_ROOT + '/solution_files/sol.txt', 'r')
myTrueFile = open (settings.MEDIA_ROOT + '/solution_files/sol.txt', 'r')
predictions = []
ground = []
for predline in myPredFile:
predictions.append(predline)
for trueline in myTrueFile:
ground.append(trueline)
corr = 0
for i in range (len(ground)):
if (ground[i] == predictions[i]):
corr = corr+1;
#print corr
myPredFile.close()
myTrueFile.close()
return (1.0 * corr)/len(ground)
def index(request):
form = ArticleForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('fileuploader/create_article.html',args)
def articles(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/fileuploader/login/')
#username = request.POST.get('username')
#password = request.POST.get('password')
#user = authenticate(username=username, password=password)
args = {}
args.update(csrf(request))
args['articles'] = Article.objects.all()
return render_to_response('fileuploader/articles.html',args)
#def login(request,user):
def login (request):
c={}
c.update(csrf(request))
return render_to_response('login.html',c)
def auth_view (request):
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
#print user, username, password
if user is not None:
auth.login(request,user)
args={}
args.update(csrf(request))
args['user'] = user
return render_to_response('loggedin.html',args)
else:
return HttpResponseRedirect('/fileuploader/invalid')
@login_required
def loggedin(request):
args = {}
args.update (csrf (request))
args['user'] = request.user
return render_to_response('loggedin.html', args)
def invalid_login(request):
return render_to_response('invalid_login.html')
def logout_page(request):
"""
Log users out and re-direct them to the main page.
"""
auth.logout(request)
return render_to_response('logout.html')
def computescores (request):
args = {}
args.update(csrf(request))
#args['articles'] = Article.objects.filter(title='aaa').update(accuracy=get_accuracy_value(Article.fileshot.filename))
obj1 = Article.objects.filter(accuracy=0.0)
for items in obj1:
items.accuracy = get_accuracy_value (items.fileshot)
items.save()
args['articles'] = obj1
return render_to_response('fileuploader/computescores.html', args)
# this is only allowable by the ADMIN/INSTRUCTOR
@staff_member_required
def createAssignment (request):
if request.POST:
form = AssignmentForm (request.POST, request.FILES)
if form.is_valid():
a = form.save()
return HttpResponseRedirect('viewAssignments.html')
else:
form = AssignmentForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('fileuploader/createAssignment.html',args)
@login_required
def viewErrorMessage(request,message):
args = {}
args['message'] = message
args['user'] = request.user
return render_to_response ('fileuploader/viewErrorMessage.html', args)
@login_required
def submitChosenAssignment (request,assignment_id):
if request.POST:
form = submissionAssignmentForm(request.POST, request.FILES)
if form.is_valid():
a = form.save(commit=False)
a.user = request.user
#print a.user
assignment = get_object_or_404 (Assignment, pk = assignment_id)
if timezone.now() > assignment.deadline_date:
htmlmessage = "Past Deadline Date"
args = {}
args.update (csrf (request))
args['message'] = htmlmessage
args['user'] = request.user
return render_to_response ('fileuploader/viewErrorMessage.html',args)
min_dt = timezone.now () - timedelta (hours=24)
max_dt = timezone.now()
previous_today = Solution.objects.filter(assignment=assignment_id,user=a.user,status='OK',submission_time__range = (min_dt,max_dt)).count() #submission_time > (timezone.now()-timedelta(1)))
if previous_today >= assignment.num_subs_per_day:
htmlmessage = "You have already submitted the allowed submissions in a 24-hour cycle"
args = {}
args.update (csrf (request))
args['message'] = htmlmessage
args['user'] = request.user
return render_to_response ('fileuploader/viewErrorMessage.html',args)
truthFile = assignment.ground_truth
a.assignment = assignment
a.save()
# update the counter
# also update the score
obj1 = Assignment.objects.get(pk = a.assignment_id) #.update(uploaded_cnt = uploaded_cnt + 1)
truthFile = obj1.ground_truth
scorer = obj1.scoring_method
obj1.uploaded_cnt = obj1.uploaded_cnt + 1
counter = obj1.uploaded_cnt
obj1.save()
# gets all the files back...
# we need a table of student - attempts - etc
obj2 = Solution.objects.filter (assignment = a.assignment)
#print len(obj2)
for items in obj2:
if items.solution_file == a.solution_file:
items.attempt = counter
items.score = computeMetrics (items.solution_file, truthFile,scorer)
flag_save = 1
if items.score == -100:
htmlmessage = "Your Prediction File has incorrect number of entries"
args = {}
args.update (csrf (request))
args['message'] = htmlmessage
args['user'] = request.user
return render_to_response ('fileuploader/viewErrorMessage.html',args)
#return HttpResponse(htmlmessage)
else:
items.submission_time = timezone.now()
items.status = 'OK'
#Compute the Public_SCORE
items.public_score = computeSampledMetrics (items.solution_file, truthFile, obj1.sampling_private,scorer)
items.save()
args={}
args.update (csrf (request))
#create a splash page
args['user'] = request.user
return render_to_response('fileuploader/thanksSubmissions.html',args)
#return render_to_response('fileuploader/viewSubmissions.html',args)
else:
assignment = get_object_or_404 (Assignment, pk = assignment_id)
form = submissionAssignmentForm()
args = {}
args.update(csrf (request))
args['form'] = form
args['assignment'] = assignment
args['user'] = request.user
#return HttpResponseRedirect('viewSubmissions.html')
return render_to_response('fileuploader/submitChosenAssignment.html',args)
#@login_required
#def submitAssignment (request):
# if request.POST:
# form = submissionForm(request.POST, request.FILES)
# if form.is_valid():
# a = form.save(commit=False)
# a.user = request.user
# print a.user
# a.save()
# update the counter
# also update the score
# obj1 = Assignment.objects.filter(name = a.assignment) #.update(uploaded_cnt = uploaded_cnt + 1)
# for items in obj1:
# items.uploaded_cnt = items.uploaded_cnt + 1
# counter = items.uploaded_cnt
# truthFile = items.ground_truth
# items.save()
# gets all the files back...
# we need a table of student - attempts - etc
# obj2 = Solution.objects.filter (assignment = a.assignment)
# for items in obj2:
# if items.solution_file == a.solution_file:
# items.attempt = counter
# items.score = computeMetrics (items.solution_file, truthFile)
# items.save()
# return HttpResponseRedirect('viewSubmissions.html')
# else:
# form = submissionForm()
# args = {}
# args.update(csrf (request))
# args['form'] = form
# return render_to_response('fileuploader/submitAssignment.html',args)
#'''
def thanksSubmissions (request):
return render_to_response ('fileuploader/thanksSubmissions.html')
@staff_member_required
def viewSubmissions (request):
args = {}
args.update (csrf (request))
args['solutions'] = Solution.objects.all ()
return render_to_response ('fileuploader/viewSubmissions.html', args)
@login_required
def selectClass (request):
if request.POST:
form = ClassForm (request.POST)
if form.is_valid():
a = form.save(commit=False)
a.user = request.user
a.save()
return HttpResponseRedirect('viewAssignments.html')
else:
form = ClassForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('fileuploader/selectClass.html',args)
@login_required
def viewAssignments (request):
args = {}
args.update(csrf(request))
current_user = request.user
sc=Class.objects.filter(user=current_user)
if sc.count() == 1:
section = Class.objects.get(user=current_user)
else:
if sc.count () == 0:
return HttpResponseRedirect('/fileuploader/selectClass.html')
#return render_to_response('fileuploader/selectClass.html',args)
else:
htmlmessage = "Class Chosen Already - Contact Instructor or Create a New User Account"
args = {}
args.update (csrf (request))
args['message'] = htmlmessage
args['user'] = request.user
return render_to_response ('fileuploader/viewErrorMessage.html',args)
if request.user.is_superuser:
args['assignments'] = Assignment.objects.all()
else:
args['assignments'] = Assignment.objects.all().filter(hidden_status = 0,class_name=section.classnum)
#UTC TIME args['currenttime'] = datetime.datetime.now()
args['currenttime'] = timezone.now()
args['user'] = request.user
return render_to_response('fileuploader/viewAssignments.html',args)
@staff_member_required
def viewPrivateRankings (request, assignment_id):
args = {}
args.update (csrf (request))
assignment = get_object_or_404 (Assignment, pk = assignment_id)
args ['assignment'] = assignment
subset_entries = Solution.objects.filter (assignment=assignment_id).filter(status = "OK").order_by('user','-submission_time')
u = "None"
leaderboard = []
i = 0
for entry in subset_entries:
if entry.user != u:
u = entry.user
leaderboard.append(entry)
i = i + 1
if assignment.scoring_method == 'RE':
leaderboard.sort(key = lambda x: x.score)
else:
leaderboard.sort(key = lambda x: x.score, reverse=True)
args['submissions'] = leaderboard
return render (request, 'fileuploader/viewPrivateRankings.html', args)
#student view
@login_required
def viewPublicRankings (request, assignment_id):
args = {}
args.update (csrf (request))
assignment = get_object_or_404 (Assignment, pk = assignment_id)
args ['assignment'] = assignment
subset_entries = Solution.objects.filter (assignment=assignment_id).filter(status = "OK").order_by('user','-submission_time')
u = "None"
leaderboard = []
i = 0
for entry in subset_entries:
if entry.user != u:
u = entry.user
leaderboard.append(entry)
i = i + 1
if assignment.scoring_method == 'RE':
leaderboard.sort(key = lambda x: x.public_score)
else:
leaderboard.sort(key = lambda x: x.public_score, reverse=True)
args['submissions'] = leaderboard
return render (request, 'fileuploader/viewPublicRankings.html', args)
#student view of Assignments
#for download
#seeing his own submissions
@staff_member_required
def viewSubmissionLogs (request, assignment_id):
args = {}
args.update (csrf (request))
assignment = get_object_or_404 (Assignment, pk = assignment_id)
args ['assignment'] = assignment
args['user'] = request.user
args['submissions'] = Solution.objects.filter (assignment = assignment_id,status='OK').order_by('submission_time')
scores_so_far = Solution.objects.values_list('score').filter (assignment = assignment_id,status='OK').order_by('submission_time')
pub_scores_so_far = Solution.objects.values_list('public_score').filter (assignment = assignment_id,status='OK').order_by('submission_time')
#print np.array (scores_so_far)
plt.plot (np.array(scores_so_far), 'ro')
plt.plot (np.array(pub_scores_so_far), 'b*')
pngfilename = str('test' + assignment_id + '.png')
plt.savefig(settings.MEDIA_ROOT + pngfilename)
args['figplot'] = pngfilename
return render (request, 'fileuploader/viewSubmissionLogs.html', args)
@login_required
def viewAssignmentsDetail (request,assignment_id):
args = {}
args.update (csrf (request))
assignment = get_object_or_404 (Assignment, pk = assignment_id)
args['assignment'] = assignment
current_user = request.user
args['submissions'] = Solution.objects.filter (assignment = assignment_id,user=current_user).order_by('-submission_time')
'''
# create a plot
scores_so_far = Solution.objects.values_list('score').filter (assignment = assignment_id)
#print np.array (scores_so_far)
plt.plot (np.array(scores_so_far), 'ro')
pngfilename = str('test' + assignment_id + '.png')
plt.savefig(settings.MEDIA_ROOT + pngfilename)
args['figplot'] = pngfilename
'''
fileurls = settings.MEDIA_URL
args['fileurls'] = fileurls
#plt.show ()
return render (request, 'fileuploader/viewAssignmentsDetail.html', args)
@staff_member_required
def deleteAssignment (request, assignment_id):
if request.user.is_superuser:
obj2 = Solution.objects.filter (assignment = assignment_id).count()
if obj2 > 0:
u1 = Solution.objects.filter(assignment=assignment_id).delete()
u2 = Assignment.objects.get(pk=assignment_id).delete()
return render_to_response ('fileuploader/viewAssignments.html')
else:
html = "<html><body>You are not authorized to permit this action</body></html>"
return HttpResponse(html)
@staff_member_required
def editAssignment (request,assignment_id):
assignment = get_object_or_404 (Assignment, pk = assignment_id)
if request.POST:
form = AssignmentForm(request.POST, request.FILES, instance=assignment)
if form.is_valid():
a = form.save()
#viewAssignmentsDetail (request, assignment_id)
return HttpResponseRedirect('../../viewAssignments.html')
else:
form = AssignmentForm(instance=assignment)
args = {}
args.update(csrf(request))
args['form'] = form
return render(request, 'fileuploader/editAssignment.html',args)
def create(request):
if request.POST:
form = ArticleForm(request.POST, request.FILES)
if form.is_valid():
a = form.save()
return HttpResponseRedirect('/fileuploader/articles.html')
else:
form = ArticleForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('fileuploader/create_article.html',args)
def register(request):
if request.POST:
uf = UserForm(request.POST, prefix='user')
if uf.is_valid():
cuser = uf.save()
cuser.set_password(cuser.password)
cuser.save()
return HttpResponseRedirect('/fileuploader/selectClass.html')
else:
uf = UserForm(prefix='user')
#return render_to_response('register.html', dict(userform=uf,userprofileform=upf), context_instance=RequestContext(request))
return render_to_response('register.html', dict(userform=uf), context_instance=RequestContext(request))
#user = User.objects.create_user(username=request.POST['login'], password=request.POST['password'])
# Create your views here.
| gpl-2.0 |
pp-mo/iris | lib/iris/tests/test_plot.py | 2 | 31819 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import cf_units
import numpy as np
import iris
import iris.coords as coords
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
@tests.skip_data
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord("time").guess_bounds()
return cube
@tests.skip_plot
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
@tests.skip_plot
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord("grid_longitude")
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord("time")
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord("grid_longitude")
cube.remove_coord("time")
self._check(cube)
@tests.skip_data
@tests.skip_plot
class TestMissingCS(tests.GraphicsTest):
@tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=["level_height", "grid_longitude"])
self.check_graphic()
plt_method(self.cube, coords=["grid_longitude", "level_height"])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=["grid_longitude", "altitude"])
self.check_graphic()
plt_method(self.cube, coords=["altitude", "grid_longitude"])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ["altitude", "grid_longitude"]
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
@tests.skip_plot
@tests.skip_data
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
super().setUp()
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord("altitude"))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord("sigma"), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord("altitude"))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(
self.cube1d.coord("sigma"), self.cube1d.coord("altitude")
)
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord(
[0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name="longitude",
units="degrees_north",
)
lat = iris.coords.AuxCoord(
[45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name="latitude",
units="degrees_north",
)
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename("some phenomenon")
cube2.rename("some other phenomenon")
cube1.units = cf_units.Unit("no_unit")
cube2.units = cf_units.Unit("no_unit")
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord("time"), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_plot_old_coords_kwarg(self):
# Coords used to be a valid kwarg to plot, but it was deprecated and
# we are maintaining a reasonable exception, check that it is raised
# here.
with self.assertRaises(TypeError):
self.draw_method(self.cube1d, coords=None)
@tests.skip_plot
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
@tests.skip_plot
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Temperature",
)
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord("longitude")
y = self.cube.coord("altitude")
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord("longitude")
y = self.cube.coord("latitude")
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor="none")
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord("latitude")
y = self.cube
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord("altitude")
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Rel Humidity",
)
y = self.cube
c = self.cube.coord("Travel Time").points
self.draw_method(x, y, c=c, edgecolor="none")
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord("altitude")[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord("model_level_number")
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
@tests.skip_plot
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
tests.GraphicsTest.setUp(self)
self.cube = iris.load_cube(
tests.get_data_path(("NAME", "NAMEIII_trajectory.txt")),
"Temperature",
)
self.draw_method = qplt.scatter
@tests.skip_data
@tests.skip_plot
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)
qplt.plot(cube.coord("depth"), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord("depth"))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(
("NetCDF", "testing", "small_theta_colpex.nc")
)
cube = iris.load_cube(path, "air_potential_temperature")[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
@tests.skip_data
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord("time")
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord("forecast_period")
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name="forecast_period",
units=cf_units.Unit("hours"),
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(("PP", "COLPEX", "small_eastward_wind.pp"))
wind = iris.load_cube(path, "x_wind")
# Remove bounds from all coords that have them.
wind.coord("grid_latitude").bounds = None
wind.coord("grid_longitude").bounds = None
wind.coord("level_height").bounds = None
wind.coord("sigma").bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord("time")
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord("forecast_period")
return cube
@tests.skip_plot
class SliceMixin:
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, "__warningregistry__"):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if isinstance(value, types.FunctionType) and key.startswith(
"test"
):
new_key = "_".join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError(
"A attribute called {!r} "
"already exists.".format(new_key)
)
def override_with_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if isinstance(value, types.FunctionType) and key.startswith(
"test"
):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(
base.__dict__, local, ignore_warnings
)
return type.__new__(cls, name, bases, local)
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolorNoBounds(
tests.GraphicsTest_nometa, SliceMixin, metaclass=CheckForWarningsMetaclass
):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super().setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@tests.skip_data
@tests.iristest_timing_decorator
class TestPcolormeshNoBounds(
tests.GraphicsTest_nometa, SliceMixin, metaclass=CheckForWarningsMetaclass
):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
def setUp(self):
super().setUp()
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
@tests.skip_plot
class Slice1dMixin:
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel("Phenomenon time")
self.check_graphic()
@tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
super().setUp()
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr:
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@tests.skip_data
@tests.skip_plot
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
super().setUp()
filename = tests.get_data_path(
("PP", "COLPEX", "theta_and_orog_subset.pp")
)
self.cube = load_cube_once(filename, "air_potential_temperature")
self.draw_module = iris.plot
self.contourf = LambdaStr(
"iris.plot.contourf",
lambda cube, *args, **kwargs: iris.plot.contourf(
cube, *args, **kwargs
),
)
self.contour = LambdaStr(
"iris.plot.contour",
lambda cube, *args, **kwargs: iris.plot.contour(
cube, *args, **kwargs
),
)
self.points = LambdaStr(
"iris.plot.points",
lambda cube, *args, **kwargs: iris.plot.points(
cube, c=cube.data, *args, **kwargs
),
)
self.plot = LambdaStr(
"iris.plot.plot",
lambda cube, *args, **kwargs: iris.plot.plot(
cube, *args, **kwargs
),
)
self.results = {
"yx": (
[self.contourf, ["grid_latitude", "grid_longitude"]],
[self.contourf, ["grid_longitude", "grid_latitude"]],
[self.contour, ["grid_latitude", "grid_longitude"]],
[self.contour, ["grid_longitude", "grid_latitude"]],
[self.points, ["grid_latitude", "grid_longitude"]],
[self.points, ["grid_longitude", "grid_latitude"]],
),
"zx": (
[self.contourf, ["model_level_number", "grid_longitude"]],
[self.contourf, ["grid_longitude", "model_level_number"]],
[self.contour, ["model_level_number", "grid_longitude"]],
[self.contour, ["grid_longitude", "model_level_number"]],
[self.points, ["model_level_number", "grid_longitude"]],
[self.points, ["grid_longitude", "model_level_number"]],
),
"tx": (
[self.contourf, ["time", "grid_longitude"]],
[self.contourf, ["grid_longitude", "time"]],
[self.contour, ["time", "grid_longitude"]],
[self.contour, ["grid_longitude", "time"]],
[self.points, ["time", "grid_longitude"]],
[self.points, ["grid_longitude", "time"]],
),
"x": ([self.plot, ["grid_longitude"]],),
"y": ([self.plot, ["grid_latitude"]],),
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, rcoords in results:
draw_method(cube, coords=rcoords)
try:
self.check_graphic()
except AssertionError as err:
self.fail(
"Draw method %r failed with coords: %r. "
"Assertion message: %s" % (draw_method, rcoords, err)
)
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, rcoords in results:
draw_method(cube.coord(rcoords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = (
"Draw method {!r} failed with coords: {!r}. "
"Assertion message: {!s}"
)
self.fail(msg.format(draw_method, rcoords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results["yx"])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results["zx"])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results["tx"])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results["x"])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results["y"])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, "contourf")
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=["grid_longitude", "grid_longitude"],
)
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=["grid_longitude", "grid_longitude", "grid_latitude"],
)
self.assertRaises(
iris.exceptions.CoordinateNotFoundError,
draw_fn,
cube,
coords=["grid_longitude", "wibble"],
)
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=[
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
],
)
self.assertRaises(
ValueError,
draw_fn,
cube,
coords=[
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
cube.coord("grid_longitude"),
],
)
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(
pts,
standard_name="model_level_number",
attributes={"positive": "up"},
)
self.draw("contourf", cube, coords=["grid_latitude", x])
@tests.skip_data
@tests.skip_plot
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
super().setUp()
filename = tests.get_data_path(
("NetCDF", "rotated", "xy", "rotPole_landAreaFraction.nc")
)
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord("grid_latitude")
rlon = self.cube.coord("grid_longitude")
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord("latitude")
lon = self.cube.coord("longitude")
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=["grid_longitude", "grid_latitude"])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=["longitude", "latitude"])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=["grid_latitude", "grid_longitude"])
self.check_graphic()
iplt.contourf(self.cube, coords=["latitude", "longitude"])
self.check_graphic()
@tests.skip_plot
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(
list(range(10)),
[0] * 10,
[iris.symbols.CLOUD_COVER[i] for i in range(10)],
0.375,
)
iplt.plt.axis("off")
self.check_graphic()
@tests.skip_plot
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(
coords.AuxCoord(
points=cube.data, standard_name="latitude", units="degrees"
),
[0, 1],
)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array(
[lat.points, lat.points + 1, lat.points + 2, lat.points + 3]
).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord("latitude")
lon = cube.coord("longitude")
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=["longitude", "latitude"])
@tests.skip_data
@tests.skip_plot
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(
("NetCDF", "transverse_mercator", "tmean_1910_1910.nc")
)
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
@tests.skip_plot
class TestPlotCitation(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.figure = plt.figure()
self.axes = self.figure.gca()
self.text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing "
"elit, sed do eiusmod tempor incididunt ut labore et "
"dolore magna aliqua."
)
def test(self):
iplt.citation(self.text)
self.check_graphic()
def test_figure(self):
iplt.citation(self.text, figure=self.figure)
self.check_graphic()
def test_axes(self):
iplt.citation(self.text, axes=self.axes)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
darcy0511/Dato-Core | src/unity/python/graphlab/test/test_sframe.py | 13 | 108197 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
# from nose import with_setup
import graphlab as gl
from graphlab.data_structures.sframe import SFrame
from graphlab.data_structures.sarray import SArray
from graphlab.data_structures.image import Image
from graphlab.connect import main as glconnect
from graphlab.connect import server
from graphlab.util import _assert_sframe_equal
import pandas as pd
from graphlab_util.timezone import GMT
from pandas.util.testing import assert_frame_equal
import unittest
import datetime as dt
import tempfile
import os
import csv
import gzip
import util
import string
import time
import numpy as np
import array
import math
import random
import shutil
import functools
HAS_PYSPARK = True
try:
from pyspark import SparkContext, SQLContext
except:
HAS_PYSPARK = False
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
# Taken from http://stackoverflow.com/questions/1151658/python-hashable-dicts
# by Alex Martelli
class hashabledict(dict):
def __key(self):
return tuple((k,self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
class SFrameTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.a_to_z = [str(unichr(97 + i)) for i in range(0, 26)]
self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
self.int_data2 = range(50,60)
self.float_data2 = [1.0 * i for i in range(50,60)]
self.string_data2 = [str(i) for i in range(50,60)]
self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})
# Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.
self.employees_sf = SFrame()
self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name')
self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id')
self.departments_sf = SFrame()
self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id')
self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name')
def __assert_sarray_equal(self, sa1, sa2):
l1 = list(sa1)
l2 = list(sa2)
self.assertEquals(len(l1), len(l2))
for i in range(len(l1)):
v1 = l1[i]
v2 = l2[i]
if v1 == None:
self.assertEqual(v2, None)
else:
if type(v1) == dict:
self.assertEquals(len(v1), len(v2))
for key in v1:
self.assertTrue(v1.has_key(key))
self.assertEqual(v1[key], v2[key])
elif (hasattr(v1, "__iter__")):
self.assertEquals(len(v1), len(v2))
for j in range(len(v1)):
t1 = v1[j]; t2 = v2[j]
if (type(t1) == float):
if (math.isnan(t1)):
self.assertTrue(math.isnan(t2))
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(v1, v2)
def test_split_datetime(self):
from_zone = GMT(0)
to_zone = GMT(4.5)
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
central = utc.astimezone(to_zone)
sa = SArray([utc,central])
expected = SFrame()
expected ['X.year'] = [2011,2011]
expected ['X.month'] = [1,1]
expected ['X.day'] = [21,21]
expected ['X.hour'] = [2,7]
expected ['X.minute'] = [37,7]
expected ['X.second'] = [21,21]
expected ['X.tzone'] = [0.0,4.5]
result = sa.split_datetime(tzone=True)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column names
expected = SFrame()
expected ['ttt.year'] = [2011,2011]
expected ['ttt.minute'] = [37,7]
expected ['ttt.second'] = [21,21]
result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sf = SFrame({'datetime': sa})
result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
def __test_equal(self, sf, df):
self.assertEquals(sf.num_rows(), df.shape[0])
self.assertEquals(sf.num_cols(), df.shape[1])
assert_frame_equal(sf.to_dataframe(), df)
def __create_test_df(self, size):
int_data = []
float_data = []
string_data = []
for i in range(0,size):
int_data.append(i)
float_data.append(float(i))
string_data.append(str(i))
return pd.DataFrame({'int_data': int_data,
'float_data': float_data,
'string_data': string_data})
# Test if the rows are all the same...row order does not matter.
# (I do expect column order to be the same)
def __assert_join_results_equal(self, sf, expected_sf):
_assert_sframe_equal(sf, expected_sf, check_row_order=False)
def test_creation_from_dataframe(self):
# created from empty dataframe
sf_empty = SFrame(data=pd.DataFrame())
self.__test_equal(sf_empty, pd.DataFrame())
sf = SFrame(data=self.dataframe, format='dataframe')
self.__test_equal(sf, self.dataframe)
sf = SFrame(data=self.dataframe, format='auto')
self.__test_equal(sf, self.dataframe)
original_p = pd.DataFrame({'a':[1.0, float('nan')]})
effective_p = pd.DataFrame({'a':[1.0, None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
original_p = pd.DataFrame({'a':['a',None,'b',float('nan')]})
effective_p = pd.DataFrame({'a':['a',None,'b',None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
def test_auto_parse_csv(self):
with tempfile.NamedTemporaryFile(delete=False) as csvfile:
df = pd.DataFrame({'float_data': self.float_data,
'int_data': self.int_data,
'string_data': self.a_to_z[:len(self.int_data)]})
df.to_csv(csvfile, index=False)
csvfile.close()
sf = SFrame.read_csv(csvfile.name, header=True)
self.assertEqual(sf.dtype(), [float, int, str])
self.__test_equal(sf, df)
def test_parse_csv(self):
with tempfile.NamedTemporaryFile(delete=False) as csvfile:
self.dataframe.to_csv(csvfile, index=False)
csvfile.close()
# list type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints=[int, int, str])
self.assertEqual(sf.dtype(), [int, int, str])
sf['int_data'] = sf['int_data'].astype(int)
sf['float_data'] = sf['float_data'].astype(float)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
# list type hints, incorrect number of columns
self.assertRaises(RuntimeError,
lambda: SFrame.read_csv(csvfile.name,
column_type_hints=[int, float]))
# dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'int_data': int,
'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# single value type hints
sf = SFrame.read_csv(csvfile.name, column_type_hints=str)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.__test_equal(sf, all_string_column_df)
# single value type hints row limit
sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.assertEqual(len(sf), 5)
self.__test_equal(sf, all_string_column_df[0:len(sf)])
sf = SFrame.read_csv(csvfile.name)
sf2 = SFrame(csvfile.name, format='csv')
self.__test_equal(sf2, sf.to_dataframe())
f = open(csvfile.name, "w")
f.write('a,b,c\n')
f.write('NA,PIKA,CHU\n')
f.write('1.0,2,3\n')
f.close()
# Default type hints, all column will be string type
sf = SFrame.read_csv(csvfile.name,
na_values=['NA','PIKA','CHU'],
column_type_hints={'a':float,'b':int})
t = list(sf['a'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 1.0)
t = list(sf['b'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 2)
t = list(sf['c'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], "3")
def test_save_load_file_cleanup(self):
# when some file is in use, file should not be deleted
with util.TempDirectory() as f:
sf = SFrame()
sf['a'] = SArray(range(1,1000000))
sf.save(f)
# many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# sf1 now references the on disk file
sf1 = SFrame(f);
# create another SFrame and save to the same location
sf2 = SFrame()
sf2['b'] = SArray([str(i) for i in range(1,100000)])
sf2['c'] = SArray(range(1, 100000))
sf2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# now sf1 should still be accessible
self.__test_equal(sf1, sf.to_dataframe())
# and sf2 is correct too
sf3 = SFrame(f)
self.__test_equal(sf3, sf2.to_dataframe())
# when sf1 goes out of scope, the tmp files should be gone
sf1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
def test_save_load(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f)
sf2 = gl.load_sframe(f)
self.__test_equal(sf2, self.dataframe)
# Check individual formats with the SFrame constructor
formats = ['.csv']
for suffix in formats:
with tempfile.NamedTemporaryFile(suffix=suffix) as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name)
sf2 = SFrame(f.name)
sf2['int_data'] = sf2['int_data'].astype(int)
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])
g2=SFrame()
g2['x']=g
g2.save(f.name)
g3=gl.SFrame.read_csv(f.name,column_type_hints=list)
self.__test_equal(g2, g3.to_dataframe())
# Make sure this file don't exist before testing
self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sf.save(os.path.join(test_dir, 'bad.frame_idx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sf2 = SFrame(os.path.join(test_dir, 'bad.frame_idx'))
# cleanup
os.removedirs(test_dir)
del sf2
def test_save_to_csv(self):
with tempfile.NamedTemporaryFile(suffix='csv', delete=True) as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='csv')
sf2 = SFrame.read_csv(f.name + '.csv', column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf2, self.dataframe)
def _remove_sframe_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
os.remove(f)
def test_creation_from_csv_on_server(self):
# create from 'remote' csv url
with tempfile.NamedTemporaryFile(suffix='.csv') as csvfile:
basesf = SFrame(self.dataframe)
basesf.save(csvfile.name, format="csv")
# Read csv giving type hints
#sf = SFrame(data='remote://' + csvfile.name, format='csv',
# column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
sf = SFrame.read_csv('remote://' + csvfile.name,
column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf, self.dataframe)
# Read csv without giving type hints, all column will be string type
sf = SFrame(data='remote://' + csvfile.name, format='csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
def test_creation_from_txt(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as f:
df = self.dataframe[['string_data']]
df.to_csv(f.name, index=False)
sf = SFrame(f.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
with tempfile.NamedTemporaryFile(suffix='.txt.gz') as fgzip:
f_in = open(f.name, 'rb')
f_out = gzip.open(fgzip.name, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
sf = SFrame(fgzip.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
def test_creation_from_csv_on_local(self):
if (isinstance(glconnect.get_server(), server.LocalServer)):
if os.path.exists('./foo.csv'):
os.remove('./foo.csv')
with open('./foo.csv', 'w') as f:
url = f.name
basesf = SFrame(self.dataframe)
basesf.save(url, format="csv")
f.close()
sf = SFrame('./foo.csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
sf = SFrame(url)
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
os.remove(url)
# test Windows line endings
if os.path.exists('./windows_lines.csv'):
os.remove('./windows_lines.csv')
windows_file_url = None
with open('./windows_lines.csv', 'w') as f:
windows_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(windows_file_url)
def test_creation_from_csv_on_http(self):
pass
# sf = SFrame(data=self.url, use_header=False)
# self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))
def test_creation_from_csv_on_s3(self):
# Requires s3 account for jenkins
# sf = SFrame(data='s3://graphlab-testdata/foo.csv')
# print sf.head(sf.num_rows())
pass
def test_creation_from_csv_dir_local(self):
csv_dir = "./csv_dir"
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
for i in range(0, 100):
with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:
url = f.name
self.dataframe.to_csv(url, index=False)
f.close()
singleton_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.0.csv"))
self.assertEquals(singleton_sf.num_rows(), 10)
many_sf = SFrame.read_csv(csv_dir)
self.assertEquals(many_sf.num_rows(), 1000)
glob_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.*2.csv"))
self.assertEquals(glob_sf.num_rows(), 100)
with self.assertRaises(RuntimeError):
SFrame.read_csv("missingdirectory")
with self.assertRaises(ValueError):
SFrame.read_csv("")
shutil.rmtree(csv_dir)
def test_creation_from_iterable(self):
# Normal dict of lists
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
df = pd.DataFrame(the_dict)
self.__test_equal(sf, df)
# Test that a missing value does not change the data type
the_dict['ints'][0] = None
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), int)
# numpy.nan is actually a float, so it should cast the column to float
the_dict['ints'][0] = np.nan
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), float)
# Just a single list
sf = SFrame(self.int_data)
df = pd.DataFrame(self.int_data)
df.columns = ['X1']
self.__test_equal(sf, df)
# Normal list of lists
list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]
sf = SFrame(list_of_lists)
cntr = 0
for i in sf:
self.assertEquals(list_of_lists[cntr], list(i['X1']))
cntr += 1
self.assertEquals(sf.num_columns(), 1)
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})
df = pd.DataFrame(the_dict)
self.__test_equal(sf2, df)
sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])
self.assertEquals(['X1','X2','X3'],sf2.column_names())
sf2.rename({'X1':'ints','X2':'floats','X3':'strings'})
sf2=sf2[['floats','ints','strings']]
self.__test_equal(sf2, df)
def test_head_tail(self):
sf = SFrame(data=self.dataframe)
assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))
# Cannot test for equality the same way because of dataframe indices
taildf = sf.tail(4)
for i in range(0, 4):
self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])
self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])
self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])
def test_head_tail_edge_case(self):
sf = SFrame()
self.assertEquals(sf.head().num_columns(), 0)
self.assertEquals(sf.tail().num_columns(), 0)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
sf = SFrame()
sf['a'] = []
self.assertEquals(sf.head().num_columns(), 1)
self.assertEquals(sf.tail().num_columns(), 1)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
def test_transform(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname], sf.column_types()[i])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
def test_transform_with_type_inference(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'])
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
# SFrame apply returns list of vector of numeric should be vector, not list
sa = sf.apply(lambda x: [x['int_data'], x['float_data']])
self.assertEqual(sa.dtype(), array.array);
def test_transform_with_exception(self):
sf = SFrame(data=self.dataframe)
self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key
self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str
self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error
self.assertRaises(IndexError, lambda: sf.apply(lambda x: x.values()[10])) # index out of bound error
def test_empty_transform(self):
sf = SFrame()
b = sf.apply(lambda x:x)
self.assertEquals(len(b.head()), 0)
def test_flatmap(self):
# Correctness of typical usage
n = 10
sf = SFrame({'id': range(n)})
new_sf = sf.flat_map(["id_range"], lambda x: [[str(i)] for i in range(x['id'])])
self.assertEqual(new_sf.column_names(), ["id_range"])
self.assertEqual(new_sf.column_types(), [str])
expected_col = [str(x) for i in range(n) for x in range(i)]
self.assertListEqual(list(new_sf['id_range']), expected_col)
# Empty SFrame, without explicit column types
sf = gl.SFrame()
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id_range'],
lambda x: [[i] for i in range(x['id'])])
# Empty rows successfully removed
sf = gl.SFrame({'id': range(15)})
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 8 else [])
self.assertEqual(new_sf.num_rows(), 6)
# First ten rows are empty raises error
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 9 else [])
def test_select_column(self):
sf = SFrame(data=self.dataframe)
sub_sf = sf.select_columns(['int_data', 'string_data'])
exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})
self.__test_equal(sub_sf, exp_df)
with self.assertRaises(ValueError):
sf.select_columns(['int_data', 'string_data', 'int_data'])
# test indexing
sub_col = sf['float_data']
self.assertEqual(sub_col.head(10), self.float_data)
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(['duh',1])
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(0)
with self.assertRaises(RuntimeError):
sub_sf = sf.select_columns(['not_a_column'])
sf = SFrame()
with self.assertRaises(RuntimeError):
sf.select_column('x')
with self.assertRaises(RuntimeError):
sf.select_columns(['x'])
sf.add_column(gl.SArray(), 'x')
# does not throw
sf.select_column('x')
sf.select_columns(['x'])
with self.assertRaises(RuntimeError):
sf.select_column('y')
with self.assertRaises(RuntimeError):
sf.select_columns(['y'])
def test_topk(self):
sf = SFrame(data=self.dataframe)
# Test that order is preserved
df2 = sf.topk('int_data').to_dataframe()
df2_expected = self.dataframe.sort('int_data', ascending=False)
df2_expected.index = range(df2.shape[0])
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('float_data', 3).to_dataframe()
df2_expected = self.dataframe.sort('float_data', ascending=False).head(3)
df2_expected.index = range(3)
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('string_data', 3).to_dataframe()
for i in range(0, 3):
self.assertEqual(df2['int_data'][2-i], i + 7)
with self.assertRaises(TypeError):
sf.topk(2,3)
sf = SFrame()
sf.add_column(SArray([1,2,3,4,5]), 'a')
sf.add_column(SArray([1,2,3,4,5]), 'b')
sf.topk('a', 1) # should not fail
def test_filter(self):
sf = SFrame(data=self.dataframe)
filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])
sf2 = sf[filter_sa]
exp_df = sf.head(3).append(sf.tail(3))
self.__test_equal(sf2, exp_df.to_dataframe())
# filter by 1s
sf2 = sf[SArray(self.int_data)]
exp_df = sf.head(10).to_dataframe()
self.__test_equal(sf2, exp_df)
# filter by 0s
sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]
exp_df = sf.head(0).to_dataframe()
self.__test_equal(sf2, exp_df)
# wrong size
with self.assertRaises(IndexError):
sf2 = sf[SArray([0,1,205])]
# slightly bigger size
sf = gl.SFrame()
n = 1000000
sf['a'] = range(n)
result = sf[sf['a'] == -1]
self.assertEquals(len(result), 0)
result = sf[sf['a'] > n - 123]
self.assertEquals(len(result), 122)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i + n - 122, l[i])
result = sf[sf['a'] < 2000]
self.assertEquals(len(result), 2000)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i, l[i])
def test_sample_split(self):
sf = SFrame(data=self.__create_test_df(100))
entry_list = set()
for i in sf:
entry_list.add(str(i))
sample_sf = sf.sample(.12, 9)
sample_sf2 = sf.sample(.12, 9)
self.assertEqual(len(sample_sf), len(sample_sf2))
assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())
for i in sample_sf:
self.assertTrue(str(i) in entry_list)
with self.assertRaises(ValueError):
sf.sample(3)
sample_sf = SFrame().sample(.12, 9)
self.assertEqual(len(sample_sf), 0)
a_split = sf.random_split(.12, 9)
first_split_entries = set()
for i in a_split[0]:
first_split_entries.add(str(i))
for i in a_split[1]:
self.assertTrue(str(i) in entry_list)
self.assertTrue(str(i) not in first_split_entries)
with self.assertRaises(ValueError):
sf.random_split(3)
self.assertEqual(len(SFrame().random_split(.4)[0]), 0)
self.assertEqual(len(SFrame().random_split(.4)[1]), 0)
# tests add_column, rename
def test_edit_column_ops(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
# Make sure auto names work
names = sf.column_names()
cntr = 1
for i in names:
self.assertEquals("X"+str(cntr), i)
cntr = cntr + 1
# Remove a column
del sf['X2']
# names
names = sf.column_names()
self.assertEquals(len(names), 2)
self.assertEquals('X1', names[0])
self.assertEquals('X3', names[1])
# check content
self.assertEquals(sf['X1'].head(10), self.int_data)
self.assertEquals(sf['X3'].head(10), self.string_data)
# check that a new automatically named column will not conflict
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
if len(uniq_set) == 1:
self.assertEquals(list(sf[i].head(10)), self.int_data)
else:
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), 3)
# replacing columns preserves order
names = sf.column_names()
for n in names:
sf[n] = sf[n].apply(lambda x: x)
self.assertEquals(sf.column_names(), names)
# do it again!
del sf['X1']
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), len(names))
# standard rename
rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}
sf.rename(rename_dict)
self.assertEquals(sf.column_names(), ['data','more_data','even_more'])
# rename a column to a name that's already taken
with self.assertRaises(RuntimeError):
sf.rename({'data':'more_data'})
# try to rename a column that doesn't exist
with self.assertRaises(ValueError):
sf.rename({'foo':'bar'})
# pass something other than a dict
with self.assertRaises(TypeError):
sf.rename('foo')
# Setting a column to const preserves order
names = sf.column_names()
for n in names:
sf[n] = 1
self.assertEquals(sf.column_names(), names)
def test_remove_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
sf2 = sf.remove_column('X3')
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])
sf2 = sf.remove_columns(['X2', 'X5'])
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X4'])
# with a generator expression
sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()))
assert sf is sf2
self.assertEquals(sf.column_names(), ['X4'])
def test_remove_bad_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_column('bad'))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4']))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
def __generate_synthetic_sframe__(self, num_users):
"""
synthetic collaborative data.
generate 1000 users, user i watched movie 0, ... i-1.
rating(i, j) = i + j
length(i, j) = i - j
"""
sf = SFrame()
sparse_matrix = {}
for i in range(1, num_users + 1):
sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]
user_ids = []
movie_ids = []
ratings = []
length_of_watching = []
for u in sparse_matrix:
user_ids += [u] * len(sparse_matrix[u])
movie_ids += [x[0] for x in sparse_matrix[u]]
ratings += [x[1] for x in sparse_matrix[u]]
length_of_watching += [x[2] for x in sparse_matrix[u]]
# typical add column stuff
sf['user_id'] = (SArray(user_ids, int))
sf['movie_id'] = (SArray(movie_ids, str))
sf['rating'] = (SArray(ratings, float))
sf['length'] = (SArray(length_of_watching, int))
return sf
def test_aggregate_ops(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf.__materialize__()
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('value'),
gl.aggregate.AVG('value'), gl.aggregate.MIN('value'), gl.aggregate.MAX('value'),
gl.aggregate.VAR('value'), gl.aggregate.STDV('value'), gl.aggregate.SUM('vector_values'), gl.aggregate.MEAN('vector_values')]
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['Count'], m)
self.assertEqual(sf2['Sum of value'], sum(values))
self.assertEqual(sf2['Avg of value'], np.mean(values))
self.assertEqual(sf2['Min of value'], min(values))
self.assertEqual(sf2['Max of value'], max(values))
self.assertEqual(sf2['Var of value'], np.var(values))
self.assertEqual(sf2['Stdv of value'], np.std(values))
self.assertEqual(sf2['Vector Sum of vector_values'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['Vector Avg of vector_values'], np.mean(vector_values, axis=0))
# For vectors
def test_aggregate_ops_on_lazy_frame(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf['value'] = sf['value'] + 0
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('value'),
gl.aggregate.AVG('value'), gl.aggregate.MIN('value'), gl.aggregate.MAX('value'),
gl.aggregate.VAR('value'), gl.aggregate.STDV('value'), gl.aggregate.SUM('vector_values'), gl.aggregate.MEAN('vector_values')]
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['Count'], m)
self.assertEqual(sf2['Sum of value'], sum(values))
self.assertEqual(sf2['Avg of value'], np.mean(values))
self.assertEqual(sf2['Min of value'], min(values))
self.assertEqual(sf2['Max of value'], max(values))
self.assertEqual(sf2['Var of value'], np.var(values))
self.assertEqual(sf2['Stdv of value'], np.std(values))
self.assertEqual(sf2['Vector Sum of vector_values'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['Vector Avg of vector_values'], np.mean(vector_values, axis=0))
def test_aggregate_ops2(self):
"""
Test builtin groupby aggregators using explicit named columns
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
built_ins = {'count':gl.aggregate.COUNT, 'sum':gl.aggregate.SUM('value'),
'avg':gl.aggregate.AVG('value'),
'avg2':gl.aggregate.MEAN('value'), 'min':gl.aggregate.MIN('value'), 'max':gl.aggregate.MAX('value'),
'var':gl.aggregate.VAR('value'), 'var2':gl.aggregate.VARIANCE('value'),
'stdv':gl.aggregate.STD('value'), 'stdv2':gl.aggregate.STDV('value'),'vector_sum': gl.aggregate.SUM('vector_values'),'vector_mean': gl.aggregate.MEAN('vector_values')}
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['count'], m)
self.assertEqual(sf2['sum'], sum(values))
self.assertEqual(sf2['avg'], np.mean(values))
self.assertEqual(sf2['avg2'], np.mean(values))
self.assertEqual(sf2['min'], min(values))
self.assertEqual(sf2['max'], max(values))
self.assertEqual(sf2['var'], np.var(values))
self.assertEqual(sf2['var2'], np.var(values))
self.assertEqual(sf2['stdv'], np.std(values))
self.assertEqual(sf2['stdv2'], np.std(values))
self.assertEqual(sf2['vector_sum'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['vector_mean'], np.mean(vector_values, axis=0))
def test_groupby(self):
"""
Test builtin groupby and aggregate on different column types
"""
num_users = 500
sf = self.__generate_synthetic_sframe__(num_users=num_users)
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('rating'),
gl.aggregate.AVG('rating'), gl.aggregate.MIN('rating'), gl.aggregate.MAX('rating'),
gl.aggregate.VAR('rating'), gl.aggregate.STDV('rating')]
built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']
"""
Test groupby user_id and aggregate on rating
"""
sf_user_rating = sf.groupby('user_id', built_ins)
actual = sf_user_rating.column_names()
expected = ['%s of rating' % v for v in built_in_names] + ['user_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_user_rating:
uid = row['user_id']
mids = range(1, uid + 1)
ratings = [uid + i for i in mids]
expected = [len(ratings), sum(ratings), np.mean(ratings), min(ratings), max(ratings), np.var(ratings), np.sqrt(np.var(ratings))]
actual = [row['Count']] + [row['%s of rating' % op] for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
"""
Test that count can be applied on empty aggregate column.
"""
sf_user_rating = sf.groupby("user_id", {'counter': gl.aggregate.COUNT()})
actual = {x['user_id']: x['counter'] for x in sf_user_rating}
expected = {i: i for i in range(1, num_users + 1)}
self.assertDictEqual(actual, expected)
"""
Test groupby movie_id and aggregate on length_of_watching
"""
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('length'),
gl.aggregate.AVG('length'), gl.aggregate.MIN('length'), gl.aggregate.MAX('length'),
gl.aggregate.VAR('length'), gl.aggregate.STDV('length')]
sf_movie_length = sf.groupby('movie_id', built_ins)
actual = sf_movie_length.column_names()
expected = ['%s of length' % v for v in built_in_names] + ['movie_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_movie_length:
mid = row['movie_id']
uids = range(int(mid), num_users + 1)
values = [i - int(mid) for i in uids]
expected = [len(values), sum(values), np.mean(values), min(values), max(values), np.var(values), np.std(values)]
actual = [row['Count']] + [row['%s of length' % op] for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
def test_quantile_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
# max and min rating for each user
g = sf.groupby('user_id', [gl.aggregate.MIN('rating'),
gl.aggregate.MAX('rating'),
gl.aggregate.QUANTILE('rating', 0, 1)])
self.assertEquals(len(g), 500)
for row in g:
minrating = row['Min of rating']
maxrating = row['Max of rating']
arr = list(row['Quantiles of rating'])
self.assertEquals(len(arr), 2)
self.assertEquals(arr[0], minrating)
self.assertEquals(arr[1], maxrating)
def test_argmax_argmin_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_ret = sf.groupby('user_id', {'movie with max rating':gl.aggregate.ARGMAX('rating','movie_id'),
'movie with min rating':gl.aggregate.ARGMIN('rating','movie_id')})
self.assertEquals(len(sf_ret), 500)
self.assertEqual(sf_ret["movie with max rating"].dtype(), str)
self.assertEqual(sf_ret["movie with min rating"].dtype(), str)
self.assertEqual(sf_ret["user_id"].dtype(), int)
# make sure we have computed correctly.
max_d = {}
min_d = {}
for i in sf:
key = i['user_id']
if key not in max_d:
max_d[key] = (i['movie_id'],i['rating'])
min_d[key] = (i['movie_id'],i['rating'])
else:
if max_d[key][1] < i['rating']:
max_d[key] = (i['movie_id'],i['rating'])
if min_d[key][1] > i['rating']:
min_d[key] = (i['movie_id'],i['rating'])
for i in sf_ret:
key = i['user_id']
self.assertEqual(i["movie with max rating"],max_d[key][0])
self.assertEqual(i["movie with min rating"],min_d[key][0])
def test_multicolumn_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_um = sf.groupby(["user_id", "movie_id"], gl.aggregate.COUNT)
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
sf_um = sf.groupby(["movie_id", "user_id"], gl.aggregate.COUNT())
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
def __assert_concat_result_equal(self, result, expected, list_columns):
self.assertEqual(result.num_columns(), expected.num_columns())
for column in result.column_names():
c1 = result[column]
c2 = expected[column]
self.assertEqual(c1.dtype(), c2.dtype())
self.assertEqual(c1.size(), c2.size())
if (column in list_columns):
for i in range(len(c1)):
if (c1[i] == None):
self.assertTrue(c2[i] == None)
continue
if (c1.dtype() == dict):
for k in c1[i]:
self.assertEqual(c2[i][k], c1[i][k])
else:
s1 = list(c1[i]);
if s1 != None: s1.sort()
s2 = list(c2[i]);
if s2 != None: s2.sort()
self.assertEqual(s1, s2)
else:
self.assertEqual(list(c1),list(c2))
def test_groupby_dict_key(self):
t = gl.SFrame({'a':[{1:2},{3:4}]})
with self.assertRaises(TypeError):
t.groupby('a', {})
def test_concat(self):
sf = SFrame()
sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]
sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]
sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']
sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]
result = sf.groupby('a', gl.aggregate.CONCAT('b'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of b': [[1,1,2,2],[1,3,3],[4],[2], []]
})
expected_result['List of b'] = expected_result['List of b'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])
result = sf.groupby('a', gl.aggregate.CONCAT('d'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])
result = sf.groupby('a', {'c_c' :gl.aggregate.CONCAT('c')})
expected_result = SFrame({
'a': [1,2,3,4, 5],
'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])
result = sf.groupby('a', gl.aggregate.CONCAT('b','c'))
expected_result = SFrame({
'a': [1,2,3,4,5],
'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])
result = sf.groupby('a', {'c_b':gl.aggregate.CONCAT('c','b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])
result = sf.groupby('a', {'cs':gl.aggregate.CONCAT('c'), 'bs':gl.aggregate.CONCAT('b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'bs': [[1,1,2,2],[1,3,3],[4],[2], []],
'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
expected_result['bs'] = expected_result['bs'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])
#exception fail if there is not column
with self.assertRaises(TypeError):
sf.groupby('a', gl.aggregate.CONCAT())
with self.assertRaises(KeyError):
sf.groupby('a', gl.aggregate.CONCAT('nonexist'))
def test_select_one(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
res = list(sf.groupby('a', {'b':gl.aggregate.SELECT_ONE('b')}))
self.assertEqual(len(res), 5)
for i in res:
self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)
def test_unique(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
self.assertEqual(len(sf.unique()), 10)
vals = [1,1,2,2,3,3,4,4, None, None]
sf = SFrame({'a':vals,'b':vals})
res = sf.unique()
self.assertEqual(len(res), 5)
self.assertEqual(sorted(list(res['a'])), sorted([1,2,3,4,None]))
self.assertEqual(sorted(list(res['b'])), sorted([1,2,3,4,None]))
def test_append_all_match(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_lazy(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
self.assertTrue(new_sf.__is_materialized__())
filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])
filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])
new_sf1 = new_sf[filter_sf1]
new_sf2 = new_sf[filter_sf2]
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())
assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())
row = sf1.head(1)
sf = SFrame()
for i in range(10):
sf = sf.append(row)
df = sf.to_dataframe()
for i in range(10):
self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))
def test_recursive_append(self):
sf = SFrame()
for i in range(200):
sf = sf.append(SFrame(data = self.dataframe))
#consume
sf.__materialize__()
def test_print_sframe(self):
sf = SFrame()
def _test_print():
sf.__repr__()
sf._repr_html_()
sf.print_rows()
n = 20
sf['int'] = [i for i in range(n)]
sf['float'] = [float(i) for i in range(n)]
sf['str'] = [str(i) for i in range(n)]
uc = '\xe5\xa4\xa7\xe5\xa4\xb4' # dato pronounced in chinese, big head
sf['unicode'] = [uc for i in range(n)]
sf['array'] = [array.array('d', [i]) for i in range(n)]
sf['list'] = [[i, float(i), [i]] for i in range(n)]
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
sf['dt'] = [utc for i in range(n)]
sf['img'] = [Image() for i in range(n)]
sf['long_str'] = ["".join([str(i)] * 50) for i in range(n)]
sf['long_unicode'] = ["".join([uc] * 50) for i in range(n)]
sf['bad_unicode'] = ['\x9d' + uc for i in range(n)]
_test_print()
def test_print_lazy_sframe(self):
sf1 = SFrame(data=self.dataframe)
self.assertTrue(sf1.__is_materialized__())
sf2 = sf1[sf1['int_data'] > 3]
sf2.__repr__()
sf2.__str__()
self.assertFalse(sf2.__is_materialized__())
len(sf2)
self.assertTrue(sf2.__is_materialized__())
def test_append_order_diff(self):
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
sf2.swap_columns('int_data', 'string_data')
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_empty_sframe(self):
sf = SFrame(data=self.dataframe)
other = SFrame()
# non empty append empty
assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)
# empty append non empty
assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)
#empty append empty
assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())
def test_append_exception(self):
sf = SFrame(data=self.dataframe)
# column number not match
other = SFrame()
other.add_column(SArray(), "test")
self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same
# column name not match
other = SFrame()
names = sf.column_names()
for name in sf.column_names():
other.add_column(SArray(), name)
names[0] = 'some name not match'
self.assertRaises(RuntimeError, lambda: sf.append(other))
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
#change one column type
sf1["int_data"] = sf2.select_column("int_data").astype(float)
self.assertRaises(RuntimeError, lambda: sf.append(other))
def test_simple_joins(self):
inner_expected = SFrame()
inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name')
inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id')
inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name')
# Tests the "natural join" case
beg = time.time()
res = self.employees_sf.join(self.departments_sf)
end = time.time()
print "Really small join: " + str(end-beg) + " s"
self.__assert_join_results_equal(res, inner_expected)
left_join_row = SFrame()
left_join_row.add_column(SArray(['John']), 'last_name')
left_join_row.add_column(SArray([None], int), 'dep_id')
left_join_row.add_column(SArray([None], str), 'dep_name')
left_expected = inner_expected.append(left_join_row)
# Left outer join, passing string to 'on'
res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')
self.__assert_join_results_equal(res, left_expected)
right_join_row = SFrame()
right_join_row.add_column(SArray([None], str), 'last_name')
right_join_row.add_column(SArray([35]), 'dep_id')
right_join_row.add_column(SArray(['Marketing']), 'dep_name')
right_expected = inner_expected.append(right_join_row)
# Right outer join, passing list to 'on'
res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])
self.__assert_join_results_equal(res, right_expected)
outer_expected = left_expected.append(right_join_row)
# Full outer join, passing dict to 'on'
res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})
self.__assert_join_results_equal(res, outer_expected)
# Test a join on non-matching key
res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})
self.assertEquals(res.num_rows(), 0)
self.assertEquals(res.num_cols(), 3)
self.assertEquals(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])
# Test a join on a non-unique key
bad_departments = SFrame()
bad_departments['dep_id'] = SArray([33,33,31,31])
bad_departments['dep_name'] = self.departments_sf['dep_name']
no_pk_expected = SFrame()
no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])
no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])
no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])
res = self.employees_sf.join(bad_departments, on='dep_id')
self.__assert_join_results_equal(res, no_pk_expected)
# Left join on non-unique key
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]
res = bad_departments.join(self.employees_sf, on='dep_id', how='left')
self.__assert_join_results_equal(res, no_pk_expected)
def test_big_composite_join(self):
# Create a semi large SFrame with composite primary key (letter, number)
letter_keys = []
number_keys = []
data = []
for i in string.ascii_lowercase:
for j in range(0,100):
letter_keys.append(i)
number_keys.append(j)
which = j % 3
if which == 0:
data.append(string.ascii_uppercase)
elif which == 1:
data.append(string.digits)
elif which == 2:
data.append(string.hexdigits)
pk_gibberish = SFrame()
pk_gibberish['letter'] = SArray(letter_keys, str)
pk_gibberish['number'] = SArray(number_keys, int)
pk_gibberish['data'] = SArray(data, str)
# Some rows that won't match
more_data = []
more_letter_keys = []
more_number_keys = []
for i in range(0,40000):
more_data.append('fish')
more_letter_keys.append('A')
more_number_keys.append(200)
for i in range(0,80):
for j in range(100,1000):
more_data.append('waffles')
more_letter_keys.append(letter_keys[j])
more_number_keys.append(number_keys[j])
# Non-matching row in this stretch
if j == 147:
more_letter_keys[-1] = 'A'
for i in range(0,5000):
more_data.append('pizza')
more_letter_keys.append('Z')
more_number_keys.append(400)
join_with_gibberish = SFrame()
join_with_gibberish['data'] = SArray(more_data, str)
join_with_gibberish['moredata'] = SArray(more_data, str)
join_with_gibberish['a_number'] = SArray(more_number_keys, int)
join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)
expected_answer = SFrame()
exp_letter = []
exp_number = []
exp_data = []
for i in range(0,80):
exp_letter.extend(letter_keys[100:147])
exp_number.extend(number_keys[100:147])
exp_letter.extend(letter_keys[148:1000])
exp_number.extend(number_keys[148:1000])
exp_data.extend(data[100:147])
exp_data.extend(data[148:1000])
expected_answer['letter'] = SArray(exp_letter, str)
expected_answer['number'] = SArray(exp_number, int)
expected_answer['data'] = SArray(exp_data, str)
expected_answer['data.1'] = 'waffles'
expected_answer['moredata'] = 'waffles'
beg = time.time()
res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})
end = time.time()
print "Join took " + str(end-beg) + " seconds"
self.__assert_join_results_equal(res, expected_answer)
def test_convert_dataframe_empty(self):
sf = SFrame()
sf['a'] = gl.SArray([], int)
df = sf.to_dataframe()
self.assertEqual(df['a'].dtype, int)
sf1 = SFrame(df)
self.assertEquals(sf1['a'].dtype(), int)
self.assertEqual(sf1.num_rows(), 0)
def test_replace_one_column(self):
sf = SFrame()
sf['a'] = [1,2,3]
self.assertEquals(sf['a'], [1,2,3])
# this should succeed as we are replacing a new column
sf['a'] = [1,2]
self.assertEquals(sf['a'], [1,2])
# failed to add new column should revert original sframe
with self.assertRaises(TypeError):
sf['a'] = [1,2,'a']
self.assertEquals(sf['a'], [1,2])
# add a column with different length should fail if there are more than one column
sf = SFrame()
sf['a'] = [1,2,3]
sf['b'] = ['a', 'b', 'c']
with self.assertRaises(RuntimeError):
sf['a'] = [1,2]
def test_filter_by(self):
# Set up SFrame to filter by
sf = SFrame()
sf.add_column(SArray(self.int_data), "ints")
sf.add_column(SArray(self.float_data), "floats")
sf.add_column(SArray(self.string_data), "strings")
# Normal cases
res = sf.filter_by(SArray(self.int_data), "ints")
self.__assert_join_results_equal(res, sf)
res = sf.filter_by(SArray(self.int_data), "ints", exclude=True)
self.assertEquals(list(res), [])
res = sf.filter_by([5,6], "ints")
exp = SFrame()
exp.add_column(SArray(self.int_data[4:6]), "ints")
exp.add_column(SArray(self.float_data[4:6]), "floats")
exp.add_column(SArray(self.string_data[4:6]), "strings")
self.__assert_join_results_equal(res, exp)
exp_opposite = SFrame()
exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), "ints")
exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), "floats")
exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), "strings")
res = sf.filter_by([5,6], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
exp_one = SFrame()
exp_one.add_column(SArray(self.int_data[4:5]), "ints")
exp_one.add_column(SArray(self.float_data[4:5]), "floats")
exp_one.add_column(SArray(self.string_data[4:5]), "strings")
exp_all_but_one = SFrame()
exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), "ints")
exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), "floats")
exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), "strings")
res = sf.filter_by(5, "ints")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
res = sf.filter_by("5", "strings")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
# Only missing values
res = sf.filter_by([77,77,88,88], "ints")
# Test against empty SFrame with correct columns/types
self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])
res = sf.filter_by([77,77,88,88], "ints", exclude=True)
self.__assert_join_results_equal(res, sf)
# Duplicate values
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Duplicate and missing
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Type mismatch
with self.assertRaises(TypeError):
res = sf.filter_by(["hi"], "ints")
# Column doesn't exist
with self.assertRaises(KeyError):
res = sf.filter_by([1,2], "intssss")
# Something that can't be turned into an SArray
with self.assertRaises(Exception):
res = sf.filter_by({1:2,3:4}, "ints")
# column_name not given as string
with self.assertRaises(TypeError):
res = sf.filter_by(1,2)
# Duplicate column names after join. Should be last because of the
# renames.
sf.rename({'ints':'id','floats':'id1','strings':'id11'})
exp.rename({'ints':'id','floats':'id1','strings':'id11'})
exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'})
res = sf.filter_by([5,6], "id")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([5,6], "id", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
def __test_to_from_dataframe(self, data, type):
sf = SFrame()
sf['a'] = data
df = sf.to_dataframe()
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
df = pd.DataFrame({'val': data})
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
def test_to_from_dataframe(self):
self.__test_to_from_dataframe([1,2,3], int)
self.__test_to_from_dataframe(['a', 'b', 'c'], str)
self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)
self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)
self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)
self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)
def test_pack_columns_exception(self):
sf = SFrame()
sf['a'] = [1, 2, 3, None, None]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
# cannot pack non array value into array
with self.assertRaises(TypeError):
sf.pack_columns(dtype=array.array)
# cannnot given non numeric na vlaue to array
with self.assertRaises(ValueError):
sf.pack_columns(dtype=array.array, fill_na='c')
# cannot pack non exist columns
with self.assertRaises(ValueError):
sf.pack_columns(['d','a'])
# cannot pack less than two columns
with self.assertRaises(ValueError):
sf.pack_columns(['a'])
# dtype has to be dict/array/list
with self.assertRaises(ValueError):
sf.pack_columns(dtype=str)
# pack duplicate columns
with self.assertRaises(ValueError):
sf.pack_columns(['a','a'])
# pack partial columns to array, should fail if for columns that are not numeric
with self.assertRaises(TypeError):
sf.pack_columns(['a','b'], dtype=array.array)
with self.assertRaises(TypeError):
sf.pack_columns(column_prefix = 1)
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = '1')
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = 'c', columns=['a', 'b'])
def test_pack_columns2(self):
from graphlab import SFrame, SArray
sf = SFrame()
sf['id'] = [1, 2, 3, 4]
sf['category.a'] = [None, '2', '3', None]
sf['category.b'] = [None, 2.0, None, 4.0]
expected = SArray([
[None, None],
['2', 2.0],
['3', None],
[None, 4.0]])
result = sf.pack_columns(column_prefix='category')
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['category'], expected)
result = sf.pack_columns(column_prefix='category', new_column_name="new name")
self.assertEqual(result.column_names(), ['id', 'new name'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['new name'], expected)
# default dtype is list
result = sf.pack_columns(column_prefix='category', dtype=list)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == True by default
expected = SArray([
{},
{'a':'2', 'b':2.0},
{'a':'3'},
{'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict)
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == False
expected = SArray([
{},
{'category.a':'2', 'category.b':2.0},
{'category.a':'3'},
{'category.b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, remove_prefix=False)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# fill_na
expected = SArray([
{'a':1, 'b':1},
{'a':'2', 'b':2.0},
{'a':'3', 'b':1},
{'a':1, 'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, fill_na = 1)
self.__assert_sarray_equal(result['category'], expected)
def test_pack_columns(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4, 5]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
expected_all_default = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
# pack all columns, all default values
self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)
expected_ab_default = SArray([
[1, None],
[2, '2'],
[3, '3'],
[4, None],
[5, '5']
])
expected_all_fillna_1 = SArray([
[1, -1, -1],
[2, '2', 2.0],
[3, '3', 3.0],
[4, -1, -1],
[5, '5', 5.0]
])
# pack all columns do not drop na and also fill with some value
result = sf.pack_columns(fill_na=-1)
self.assertEqual(result.column_names(), ['X1'])
self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)
# pack partial columns, all default value
result = sf.pack_columns(['id','b'])
self.assertEqual(result.column_names(), ['c','X2'])
self.__assert_sarray_equal(result['c'], sf['c'])
self.__assert_sarray_equal(result['X2'], expected_ab_default)
expected_sarray_ac_fillna_default = SArray([
[1, float('NaN')],
[2, 2.0],
[3, 3.0],
[4, float('NaN')],
[5, 5.0]
])
result = sf.pack_columns(['id','c'], dtype=array.array)
self.assertEqual(result.column_names(), ['b', 'X2'])
self.__assert_sarray_equal(result['b'], sf['b'])
self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)
expected_dict_default = SArray([
{'id': 1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id':4 },
{'id':5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict)
self.__assert_sarray_equal(result['X1'], expected_dict_default)
expected_dict_fillna = SArray([
{'id': 1, 'b':-1, 'c': -1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id': 4, 'b':-1, 'c': -1},
{'id': 5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict, fill_na=-1)
self.__assert_sarray_equal(result['X1'], expected_dict_fillna)
# pack large number of rows
sf = SFrame()
num_rows = 100000
sf['a'] = range(0, num_rows);
sf['b'] = range(0, num_rows);
result = sf.pack_columns(['a', 'b']);
self.assertEqual(len(result), num_rows);
def test_pack_columns_dtype(self):
a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})
b = a.pack_columns(['name','data'],dtype=array.array)
expected = SArray([[-140500967, 3],[-1405039672,4]])
self.__assert_sarray_equal(b['X1'], expected)
def test_unpack_list(self):
sa = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
expected = SFrame()
expected ['a'] = [1, 2, 3, 4, 5]
expected ['b'] = [None, '2', '3', None, '5']
expected ['c'] = [None, 2.0, 3.0, None, 5.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(column_name_prefix='ttt');
self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = [1, 2, None, 4, 5]
e['b'] = [None, '2', '3', None, '5']
e['c'] = [None, 2.0, None, None, 5.0]
result = sa.unpack(na_value=3);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# wrong length
with self.assertRaises(TypeError):
sa.unpack(column_name_prefix=['a','b'])
# wrong type
with self.assertRaises(RuntimeError):
sa.unpack(column_types = [str, int, float])
# wrong limit types
with self.assertRaises(TypeError):
sa.unpack(limit=["1"])
# int array cannot be unpacked
with self.assertRaises(TypeError):
SArray([1,2,3,4]).unpack()
# column name must be a string
with self.assertRaises(TypeError):
sa.unpack(1)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = int)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = [np.array])
# cannot infer type if no values
with self.assertRaises(RuntimeError):
SArray([], list).unpack()
def test_unpack_array(self):
sa = SArray([
[1, 1, 0],
[2, -1, 1],
[3, 3, 2],
[-1, 2, 3],
[5, 5, 4]
])
expected = SFrame()
expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]
expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]
expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# right amount column names
result = sa.unpack(column_name_prefix = 'unpacked');
result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])))
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
expected['a'] = expected['a'].astype(int)
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype(float)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = gl.SArray([1, 2, 3, None, 5], float)
e['b'] = gl.SArray([1, None, 3, 2, 5], float)
e['c'] = gl.SArray([0, 1, 2, 3, 4], float)
result = sa.unpack(na_value=-1);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
def test_unpack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5,6,7]
sf["is_restaurant"] = [1, 1,0,0, 1, None, None]
sf["is_retail"] = [None,1,1,None,1, None, None]
sf["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
packed_sf = gl.SFrame()
packed_sf['user_id'] = sf['user_id']
packed_sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
with self.assertRaises(TypeError):
packed_sf['user_id'].unpack()
with self.assertRaises(TypeError):
packed_sf['category'].unpack(1)
with self.assertRaises(TypeError):
packed_sf['category'].unpack(value_types = [int])
# unpack only one column
expected_sf = gl.SFrame()
expected_sf["is_retail"] = sf["is_retail"]
unpacked_sf = packed_sf['category'].unpack(limit=["is_retail"], column_types=[int], column_name_prefix=None)
assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())
# unpack all
unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=["is_restaurant", "is_retail", "is_electronics"])
assert_frame_equal(unpacked_sf.to_dataframe(), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe())
# auto infer types, the column order may be different, so use order here before comparison
unpacked_sf = packed_sf["category"].unpack()
unpacked_sf.rename({
"X.is_restaurant": "is_restaurant",
"X.is_retail": "is_retail",
"X.is_electronics": "is_electronics"
})
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe().sort(axis=1))
unpacked_sf = packed_sf["category"].unpack(na_value = 0, column_name_prefix="new")
expected = SFrame()
expected["new.is_restaurant"] = [1, 1,None,None, 1, None, None]
expected["new.is_retail"] = [None,1,1,None,1, None, None]
expected["new.is_electronics"] = ["yes", "no","yes",None,"no", None, None]
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), expected.to_dataframe().sort(axis=1))
# unpack a dictionary key integer as key
from graphlab import SArray
sa = SArray([
{1: 'a'},
{2: 'b'}
])
result = sa.unpack()
expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2])
expected = SFrame({'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2], column_name_prefix="expanded")
expected = SFrame({'expanded.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sa = gl.SArray([{i:i} for i in range(500)])
unpacked_sa = sa.unpack()
self.assertEqual(len(unpacked_sa), len(sa))
i = 0
for v in unpacked_sa:
for j in range(500):
val = v['X.' + str(j)]
if (j == i):
self.assertEqual(val, i);
else:
self.assertEqual(val, None);
i = i + 1
# if types don't agree, convert to string automatically
sa = gl.SArray([{'a':1},{'a': 'a_3'}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [str])
sa = gl.SArray([{'a':None}, {'a': 1}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
sa = gl.SArray([{'a':1}, {'a': None}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
# type inferrence is already at server side even if limit is given
sa = gl.SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])
unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix="")
for i in range(10):
v = unpacked[i]
for j in range(10):
if (j != i):
self.assertEqual(v['c'+str(j)], None)
elif j % 2 == 0:
self.assertEqual(v['c'+str(j)], j)
else:
self.assertEqual(v['c'+str(j)], 'v' + str(j))
def test_unpack_sframe(self):
from graphlab import SFrame, SArray
import graphlab as gl
sf = gl.SFrame()
sf['user_id'] = range(7)
sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
sf['list'] = [
None,
range(1),
range(2),
range(3),
range(1),
range(2),
range(3),
]
with self.assertRaises(TypeError):
sf.unpack('user_id')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list'] = sf['list']
expected["is_restaurant"] = [1, 1,0,0, 1, None, None]
expected["is_retail"] = [None,1,1,None,1, None, None]
expected["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
result = sf.unpack('category')
result.rename({
'category.is_restaurant': 'is_restaurant',
'category.is_retail': 'is_retail',
'category.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="")
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="abc")
result.rename({
'abc.is_restaurant': 'is_restaurant',
'abc.is_retail': 'is_retail',
'abc.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", column_types=[str], limit=['is_restaurant'])
new_expected = expected[['user_id', 'list', 'is_restaurant']]
new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)
assert_frame_equal(new_expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", na_value = None)
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,2, None, None,2]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list', na_value= 2)
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,None, None, None,None]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
# auto resolving conflicting names
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [range(5) for i in range(100)]
sf['b.0'] = range(100)
sf['b.0.1'] = range(100)
result = sf.unpack('b')
self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]
sf['b.str1'] = range(100)
result = sf.unpack('b')
self.assertEqual(len(result.column_names()), 4)
def test_stack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["category"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
{},
None]
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3,4,5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(ValueError):
sf.stack('category', ['user_id', 'value'])
# normal case
stacked_sf = sf.stack('category', ['category', 'value'])
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
# set column types
stacked_sf = sf.stack('category')
self.assertTrue(stacked_sf.column_types()[2] == str)
self.assertTrue(stacked_sf.column_types()[3] == int)
# auto generate column names
stacked_sf = sf.stack('category')
new_column_names = stacked_sf.column_names()
self.assertTrue(len(new_column_names) == 4)
expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]})
df_expected = expected_sf.to_dataframe().sort(['user_id', new_column_names[2]]).reset_index(drop=True)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", new_column_names[2]]).reset_index(drop=True), df_expected)
#dropna
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3, 4, 5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
stacked_sf = sf.stack('category', ['category','value'], drop_na = False)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
def test_stack_list(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [['a', 'b'], ['c'], ['d'],['e', None], None]
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4,5]
expected_result['X1'] = ['a','b','c','d','e',None, None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4]
expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
sf = SFrame()
n = 1000000
sf['a'] = range(1,n)
sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]
result = sf.stack('b')
self.assertTrue(len(result), n * 2)
def test_stack_vector(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]
expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]
expected_result[result.column_names()[1]] = gl.SArray([1,1,2,1,2,3,1,2,3,4], float)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
def test_unstack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["categories"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
None]
stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)
# normal unstack
unstacked_sf = stacked_sf.unstack(column=['category', 'value'], new_column_name = 'categories')
# these frames are *almost* equal except user4 will be {} instead of None
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing new column name
unstacked_sf = stacked_sf.unstack(['category', 'value'])
self.assertEqual(len(unstacked_sf.column_names()), 3)
unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'})
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing column names
with self.assertRaises(KeyError):
stacked_sf.unstack(['category','value1'])
# wrong input
with self.assertRaises(TypeError):
stacked_sf.unstack(['category'])
# duplicate new column name
with self.assertRaises(RuntimeError):
unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')
def test_unstack_list(self):
sf = SFrame()
sf['a'] = [1,2,3,4]
sf['b'] = [range(10), range(20), range(30), range(50)]
stacked_sf = sf.stack('b', new_column_name = 'new_b')
unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
with self.assertRaises(RuntimeError):
stacked_sf.unstack('new_b', new_column_name='a')
with self.assertRaises(TypeError):
stacked_sf.unstack(['new_b'])
with self.assertRaises(KeyError):
stacked_sf.unstack('non exist')
def test_content_identifier(self):
sf = SFrame({"a":[1,2,3,4],"b":["1","2","3","4"]})
a1 = sf['a'].__get_content_identifier__()
a2 = sf['a'].__get_content_identifier__()
self.assertEquals(a1, a2)
def test_random_access(self):
t1 = list(range(0,100000))
t2 = [str(i) for i in t1]
t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))];
s = gl.SFrame({'t1':t1,'t2':t2})
# simple slices
self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))
self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))
self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))
self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))
self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))
# negative slices
self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))
self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))
self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))
self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912],t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10],t[-10])
# edge case odities
self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))
self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))
self.assertEqual(len(s[-1:-2]), 0)
self.assertEqual(len(s[-1:-1000:2]), 0)
with self.assertRaises(IndexError):
s[len(s)]
def test_sort(self):
sf = SFrame()
nrows = 100
sf['a'] = range(1, nrows)
sf['b'] = [float(i) for i in range(1,nrows)]
sf['c'] = [str(i) for i in range(1,nrows)]
sf['d'] = [[i, i+1] for i in range(1,nrows)]
reversed_sf = SFrame()
reversed_sf['a'] = range(nrows-1, 0, -1)
reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]
reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]
reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]
with self.assertRaises(TypeError):
sf.sort()
with self.assertRaises(TypeError):
sf.sort(1)
with self.assertRaises(TypeError):
sf.sort("d")
with self.assertRaises(ValueError):
sf.sort("nonexist")
with self.assertRaises(TypeError):
sf.sort({'a':True})
result = sf.sort('a')
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort('a', ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# sort two columns
result = sf.sort(['a', 'b'])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort(['a', 'c'], ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', True), ('b', False)])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', False), ('b', True)])
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
def test_dropna(self):
# empty case
sf = SFrame()
self.assertEquals(len(sf.dropna()), 0)
# normal case
self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())
test_split = self.employees_sf.dropna_split()
self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())
self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())
# create some other test sframe
test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),
'floats':SArray([np.nan,2.,3.,4.,np.nan],float),
'strs':SArray(['1',np.nan,'','4',None], str),
'lists':SArray([[1],None,[],[1,1,1,1],None], list),
'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})
# another normal, but more interesting case
self.__test_equal(test_sf.dropna(),
pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))
test_split = test_sf.dropna_split()
self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())
# the 'all' case
self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(how='all')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# select some columns
self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())
test_split = test_sf.dropna_split(['ints','floats'], how='all')
self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())
self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split('strs')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(['strs','dicts'])
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# bad stuff
with self.assertRaises(TypeError):
test_sf.dropna(1)
test_sf.dropna([1,2])
test_sf.dropna('strs', how=1)
test_sf.dropna_split(1)
test_sf.dropna_split([1,2])
test_sf.dropna_split('strs', how=1)
with self.assertRaises(ValueError):
test_sf.dropna('ints', how='blah')
test_sf.dropna_split('ints', how='blah')
with self.assertRaises(RuntimeError):
test_sf.dropna('dontexist')
test_sf.dropna_split('dontexist')
def test_add_row_number(self):
sf = SFrame(self.__create_test_df(400000))
sf = sf.add_row_number('id')
self.assertEquals(list(sf['id']), range(0,400000))
del sf['id']
sf = sf.add_row_number('id', -20000)
self.assertEquals(list(sf['id']), range(-20000,380000))
del sf['id']
sf = sf.add_row_number('id', 40000)
self.assertEquals(list(sf['id']), range(40000,440000))
with self.assertRaises(RuntimeError):
sf.add_row_number('id')
with self.assertRaises(TypeError):
sf = sf.add_row_number(46)
sf = sf.add_row_number('id2',start='hi')
def test_check_lazy_sframe_size(self):
# empty sframe, materialized, has_size
sf = SFrame()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# add one column, not materialized, has_size
sf['a'] = range(1000)
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# materialize it, materialized, has_size
sf['a'] = range(1000)
sf.__materialize__()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# logical filter, not materialized, not has_size
sf = sf[sf['a'] > 5000]
self.assertFalse(sf.__is_materialized__())
self.assertFalse(sf.__has_size__())
def test_sframe_to_rdd(self):
if not HAS_PYSPARK:
print "Did not run Pyspark unit tests!"
return
sc = SparkContext('local')
# Easiest case: single column of integers
test_rdd = sc.parallelize(range(100))
sf = SFrame.from_rdd(test_rdd)
self.assertTrue(sf.num_cols(), 1)
self.assertTrue(sf.column_names(), ['X1'])
# We cast integers to floats to be safe on varying types
self.assertEquals([float(i) for i in range(0,100)], list(sf['X1']))
sc.stop()
def test_rdd_to_sframe(self):
if not HAS_PYSPARK:
print "Did not run Pyspark unit tests!"
return
sc = SparkContext('local')
# Easiest case: single column of integers
sf = SFrame({'column_name':range(100)})
test_rdd = sf.to_rdd(sc)
res = test_rdd.collect()
self.assertEquals(res, [{'column_name':long(i)} for i in range(100)])
sc.stop()
def test_column_manipulation_of_lazy_sframe(self):
import graphlab as gl
g=gl.SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
del g['id']
# if lazy column deletion is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
g=gl.SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
g.swap_columns('a','id')
# if lazy column swap is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
def test_empty_sarray(self):
with util.TempDirectory() as f:
sf = SArray()
sf.save(f)
sf2 = SArray(f)
self.assertEquals(len(sf2), 0)
def test_empty_sframe(self):
with util.TempDirectory() as f:
sf = SFrame()
sf.save(f)
sf2 = SFrame(f)
self.assertEquals(len(sf2), 0)
self.assertEquals(sf2.num_columns(), 0)
def test_none_column(self):
sf = SFrame({'a':[1,2,3,4,5]})
sf['b'] = None
self.assertEqual(sf['b'].dtype(), float)
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})
self.__test_equal(sf, df)
sa = SArray.from_const(None, 100)
self.assertEquals(list(sa), [None] * 100)
self.assertEqual(sa.dtype(), float)
def test_apply_with_partial(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
def concat_fn(character, row):
return '%s%d' % (character, row['a'])
my_partial_fn = functools.partial(concat_fn, 'x')
sa = sf.apply(my_partial_fn)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, row):
return '%s%d' % (self.character, row['a'])
concatenator = Concatenator('x')
sa = sf.apply(concatenator)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
if __name__ == "__main__":
import sys
# Check if we are supposed to connect to another server
for i, v in enumerate(sys.argv):
if v.startswith("ipc://"):
gl._launch(v)
# The rest of the arguments need to get passed through to
# the unittest module
del sys.argv[i]
break
unittest.main()
| agpl-3.0 |
m4rx9/rna-pdb-tools | rna_tools/tools/pdbs_measure_atom_dists/pdbs_measure_atom_dists.py | 2 | 5992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a quick and dirty method of comparison two RNA structures (stored in pdb files).
It measures the distance between the relevan atoms (C4') for nucleotides defined as "x" in the
sequence alignment.
author: F. Stefaniak, modified by A. Zyla, supervision of mmagnus
"""
from __future__ import print_function
from Bio.PDB import PDBParser
from scipy.spatial import distance
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import logging
import argparse
import numpy as np
import matplotlib.pyplot as plt
# logger
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_seq(alignfn, seqid):
"""Get seq from an alignment with gaps.
Args:
alignfn (str): a path to an alignment
seqid (str): seq id in an alignment
Usage::
>>> get_seq('test_data/ALN_OBJ1_OBJ2.fa', 'obj1')
SeqRecord(seq=SeqRecord(seq=Seq('GUUCAG-------------------UGAC-', SingleLetterAlphabet()), id='obj1', name='obj1', description='obj1', dbxrefs=[]), id='<unknown id>', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Returns:
SeqRecord
"""
# alignment = AlignIO.read(alignfn, 'fasta')
alignment = SeqIO.index(alignfn, 'fasta')
# print SeqRecord(alignment[seqid])
sequence = SeqRecord(alignment[seqid])
return sequence
def open_pdb(pdbfn):
"""Open pdb with Biopython.
Args:
pdbfn1 (str): a path to a pdb structure
Returns:
PDB Biopython object: with a pdb structure
"""
parser = PDBParser()
return parser.get_structure('', pdbfn)
def find_core(seq_with_gaps1, seq_with_gaps2):
""".
Args:
seq_with_gaps1 (str): a sequence 1 from the alignment
seq_with_gaps1 (str): a sequence 2 from the alignment
Usage::
>>> find_core('GUUCAG-------------------UGAC-', 'CUUCGCAGCCAUUGCACUCCGGCUGCGAUG')
'xxxxxx-------------------xxxx-'
Returns:
core="xxxxxx-------------------xxxx-"
"""
core = "".join(["x" if (a != '-' and b != '-') else "-" for (a, b)
in zip(seq_with_gaps1, seq_with_gaps2)])
return core
def map_coords_atom(structure):
""".
Args:
structure (pdb): PDB Biopython object: with a pdb structure
Returns:
struct1dict: a list of coords for atoms
structure1realNumber: a list of residues
"""
resNumber = 0
struct1dict = {}
structure1realNumbers = {}
for res in structure.get_residues():
# print res
for atom in res:
name, coord = atom.name.strip(), atom.coord
# G C5' [-15.50800037 -7.05600023 13.91800022]
if name == atomToCompare:
# print name, coord
struct1dict[resNumber] = coord
structure1realNumbers[resNumber] = res.get_full_id()[3][1]
resNumber += 1
return struct1dict, structure1realNumbers
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("seqid1", help="seq1 id in the alignemnt")
parser.add_argument("seqid2", help="seq2 id in the alignemnt")
parser.add_argument("alignfn", help="alignemnt in the Fasta format")
parser.add_argument("pdbfn1", help="pdb file1")
parser.add_argument("pdbfn2", help="pdb file2")
return parser
# main
if __name__ == '__main__':
import doctest
doctest.testmod()
args = get_parser().parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
# if not args.outfn:
# args.outfn = args.pdbfn.replace('.pdb', '_out.pdb')
seq_with_gaps1 = get_seq(args.alignfn, args.seqid1)
seq_with_gaps2 = get_seq(args.alignfn, args.seqid2)
pdb1 = open_pdb(args.pdbfn1)
pdb2 = open_pdb(args.pdbfn2)
core = find_core(seq_with_gaps1, seq_with_gaps2)
atomToCompare = "C4'"
sep = '\t'
structure1 = pdb1
structure2 = pdb2
struct1dict, structure1realNumbers = map_coords_atom(pdb1)
struct2dict, structure2realNumbers = map_coords_atom(pdb2)
stats = []
stats.append(["res1", "res2", "distance [A]"])
resNumber = 0
seq1number = -1
seq2number = -1
for char in core:
# local sequence numbering
if seq_with_gaps1[resNumber] != '-':
seq1number += 1
# print "seq1", seq1number, seq1[resNumber]
if seq_with_gaps2[resNumber] != '-':
seq2number += 1
# print "seq2", seq2number, seq2[resNumber]
# alignment checking (iksy)
if char == 'x':
vect1 = struct1dict[seq1number]
vect2 = struct2dict[seq2number]
stats.append([str(structure1realNumbers[seq1number]),
str(structure2realNumbers[seq2number]),
str(distance.euclidean(vect1, vect2))])
# print vect1,vect2
resNumber += 1
# struc = renumber(seq_with_gaps, pdb, args.residue_index_start)
# write_struc(struc, args.outfn)
list_res=[]
for i in stats:
print(sep.join(i))
table=(sep.join(i))
list_res.append(i)
#print (list_res)
res_matrix = np.array(list_res[1:])
'''
Creating a plot
'''
new_resi = list_res[1:]
new_resis = []
for i in new_resi:
#print (j)
new_resis.append(str(i[0]) + '/' + str(i[1]))
#print (new_resis)
list2_matrix= new_resis
list2_matrix1 = map(float, list(res_matrix[:,2]))
#print (list2_matrix1)
plt.bar(list2_matrix,list2_matrix1,facecolor='pink' )
plt.suptitle('Distance between C4 atoms of residues')
plt.ylabel("distance [A]")
plt.xlabel('Nr of residue')
plt.show()
| mit |
rseubert/scikit-learn | sklearn/cross_decomposition/cca_.py | 23 | 3087 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
JeffHeard/terrapyn | geocms/drivers/ogr.py | 1 | 11631 | # from ga_ows.views import wms, wfs
import shutil
import json
from zipfile import ZipFile
import pandas
from django.contrib.gis.geos import Polygon
import os
from osgeo import osr, ogr
from pandas import DataFrame
from shapely import wkb
from django.template.defaultfilters import slugify
import re
from . import Driver
def ogrfield(elt):
return re.sub('-', '_', slugify(elt).encode('ascii'))[0:10]
def identity(x):
return '"' + x + '"' if isinstance(x, basestring) else str(x)
dtypes = {
'int64': ogr.OFTInteger,
'float64': ogr.OFTReal,
'object': ogr.OFTString,
'datetime64[ns]': ogr.OFTDateTime
}
geomTypes = {
'GeometryCollection': ogr.wkbGeometryCollection,
'LinearRing': ogr.wkbLinearRing,
'LineString': ogr.wkbLineString,
'MultiLineString': ogr.wkbMultiLineString,
'MultiPoint': ogr.wkbMultiPoint,
'MultiPolygon': ogr.wkbMultiPolygon,
'Point': ogr.wkbPoint,
'Polygon': ogr.wkbPolygon
}
def transform(geom, crx):
if crx:
geom.Transform(crx)
return geom
class OGRDriver(Driver):
def ready_data_resource(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
slug, srs = super(OGRDriver, self).ready_data_resource(**kwargs)
cfg = self.resource.driver_config
mapnik_config = {
'type' : 'ogr',
'base' : self.cache_path,
}
if 'sublayer' in kwargs:
mapnik_config['layer'] = kwargs['sublayer']
elif 'layer' in cfg:
mapnik_config['layer'] = cfg['layer']
elif 'layer_by_index' in cfg:
mapnik_config['layer_by_index'] = cfg['layer_by_index']
else:
mapnik_config['layer_by_index'] = 0
if 'multiple_geometries' in cfg:
mapnik_config['multiple_geometries'] = cfg['multiple_geometries']
if 'encoding' in cfg:
mapnik_config['encoding'] = cfg['encoding']
if 'string' in cfg:
mapnik_config['string'] = cfg['string']
else:
mapnik_config['file'] = self.get_master_filename()
return slug, srs, mapnik_config
def get_master_filename(self):
cfg = self.resource.driver_config
if 'file' in cfg:
return self.cache_path + '/' + cfg['file']
elif 'xtn' in cfg:
return self.get_filename(cfg['xtn'])
else:
return self.get_filename('')[:-1] # omit the trailing period and assume we're using the directory (such as for MapInfo)
def compute_spatial_metadata(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
super(OGRDriver, self).compute_spatial_metadata(**kwargs)
# if we have a zip archive, we should expand it now
archive_filename = self.get_filename('zip')
if os.path.exists(archive_filename):
archive = ZipFile(self.cached_basename + self.src_ext)
os.mkdir(self.cached_basename) # we will put everything cached underneath the cached base directory
archive.extractall(self.cached_basename)
ds = ogr.Open(self.get_master_filename())
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
xmin, xmax, ymin, ymax = lyr.GetExtent()
crs = lyr.GetSpatialRef()
self.resource.spatial_metadata.native_srs = crs.ExportToProj4()
e4326 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
crx = osr.CoordinateTransformation(crs, e4326)
x04326, y04326, _ = crx.TransformPoint(xmin, ymin)
x14326, y14326, _ = crx.TransformPoint(xmax, ymax)
self.resource.spatial_metadata.bounding_box = Polygon.from_bbox((x04326, y04326, x14326, y14326))
self.resource.spatial_metadata.native_bounding_box = Polygon.from_bbox((xmin, ymin, xmax, ymax))
self.resource.spatial_metadata.three_d = False
self.resource.spatial_metadata.save()
self.resource.save()
def get_data_fields(self, **kwargs):
_, _, result = self.ready_data_resource(**kwargs)
ds = ogr.Open(self.get_master_filename())
lyr = ds.GetLayerByIndex(0) if 'layer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
return [(field.name, field.GetTypeName(), field.width) for field in lyr.schema]
def get_data_for_point(self, wherex, wherey, srs, **kwargs):
result, x1, y1, epsilon = super(OGRDriver, self).get_data_for_point(wherex, wherey, srs, **kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
if epsilon == 0:
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt("POINT({x1} {y1})".format(**locals())))
else:
from django.contrib.gis import geos
wkt = geos.Point(x1, y1).buffer(epsilon).wkt
print wkt
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
return [f.items() for f in lyr]
def attrquery(self, key, value):
key, op = key.split('__')
op = {
'gt': ">",
'gte': ">=",
'lt': "<",
'lte': '<=',
'startswith': 'LIKE',
'endswith': 'LIKE',
'istartswith': 'ILIKE',
'iendswith': 'ILIKE',
'icontains': "ILIKE",
'contains': "LIKE",
'in': 'IN',
'ne': "<>"
}[op]
value = {
'gt': identity,
'gte': identity,
'lt': identity,
'lte': identity,
'startswith': lambda x: '%' + x,
'endswith': lambda x: x + '%',
'istartswith': lambda x: '%' + x,
'iendswith': lambda x: x + '%',
'icontains': lambda x: '%' + x + '%',
'contains': lambda x: '%' + x + '%',
'in': lambda x: x if isinstance(x, basestring) else '(' + ','.join(identity(a) for a in x) + ')',
'ne': identity
}[op](value)
return ' '.join([key, op, value])
def as_dataframe(self, **kwargs):
"""
Creates a dataframe object for a shapefile's main layer using layer_as_dataframe. This object is cached on disk for
layer use, but the cached copy will only be picked up if the shapefile's mtime is older than the dataframe's mtime.
:param shp: The shapefile
:return:
"""
dfx_path = self.get_filename('dfx')
if len(kwargs) != 0:
ds = ogr.Open(self.get_master_filename())
lyr = ds.GetLayerByIndex(0)
crx = xrc = None
if 'bbox' in kwargs:
minx, miny, maxx, maxy = kwargs['bbox']
if 'srs' in kwargs:
if isinstance(kwargs['srs'], basestring):
s_srs = osr.SpatialReference()
if kwargs['srs'].lower().startswith('epsg:'):
s_srs.ImportFromEPSG(int(kwargs['srs'].split(':')[1]))
else:
s_srs.ImportFromProj4(kwargs['srs'])
else:
s_srs = kwargs['srs']
t_srs = self.resource.srs
if s_srs.ExportToProj4() != t_srs.ExportToProj4():
crx = osr.CoordinateTransformation(s_srs, t_srs)
minx, miny, _ = crx.TransformPoint(minx, miny)
maxx, maxy, _ = crx.TransformPoint(maxx, maxy)
xrc = osr.CoordinateTransformation(t_srs, s_srs)
lyr.SetSpatialFilterRect(minx, miny, maxx, maxy)
elif 'boundary' in kwargs:
boundary = ogr.Geometry(geomTypes[kwargs['boundary_type']], kwargs["boundary"])
lyr.SetSpatialFilter(boundary)
if 'query' in kwargs:
if isinstance(kwargs['query'], basestring):
query = json.loads(kwargs['query'])
else:
query = kwargs['query']
for key, value in query.items():
attrq = self.attrquery(key, value) if '__' in key else key, '='
lyr.SetAttributeFilter(attrq)
start = kwargs['start'] if 'start' in kwargs else 0
count = kwargs['count'] if 'count' in kwargs else len(lyr) - start
records = []
for i in range(start):
lyr.next()
for i in range(count):
f = lyr.next()
if f.geometry():
records.append(
dict(fid=i, geometry=wkb.loads(transform(f.geometry(), xrc).ExportToWkb()), **f.items()))
df = DataFrame.from_records(
data=records,
index='fid'
)
if 'sort_by' in kwargs:
df = df.sort_index(by=kwargs['sort_by'])
return df
elif hasattr(self, '_df'):
return self._df
elif os.path.exists(dfx_path) and os.stat(dfx_path).st_mtime >= os.stat(self.get_master_filename()).st_mtime:
if self.resource.big:
self._df = pandas.read_hdf(dfx_path, 'df')
else:
self._df = pandas.read_pickle(dfx_path)
return self._df
else:
ds = ogr.Open(self.get_master_filename())
lyr = ds.GetLayerByIndex(0)
df = DataFrame.from_records(
data=[dict(fid=f.GetFID(), geometry=wkb.loads(f.geometry().ExportToWkb()), **f.items()) for f in lyr if
f.geometry()],
index='fid'
)
if self.resource.big:
df.to_hdf(dfx_path, 'df')
else:
df.to_pickle(dfx_path)
self._df = df
return self._df
@classmethod
def from_dataframe(cls, df, shp, driver, srs, in_subdir=False):
"""Write an dataframe object out as a shapefile"""
drv = ogr.GetDriverByName(driver)
if driver != 'Memory':
if in_subdir:
if os.path.exists(shp):
shutil.rmtree(shp)
os.mkdir(shp)
else:
if os.path.exists(shp):
os.unlink(shp)
ds = drv.CreateDataSource(shp)
keys = df.keys()
fieldDefns = [ogr.FieldDefn(ogrfield(name), dtypes[df[name].dtype.name]) for name in keys if name != 'geometry']
geomType = geomTypes[(f for f in df['geometry']).next().type]
l = ds.CreateLayer(
name=os.path.split(shp)[-1],
srs=srs,
geom_type=geomType
)
for f in fieldDefns:
l.CreateField(f)
for i, record in df.iterrows():
feature = ogr.Feature(l.GetLayerDefn())
for field, value in ((k, v) for k, v in record.to_dict().items() if k != 'geometry'):
if isinstance(value, basestring):
value = value.encode('ascii')
feature.SetField(ogrfield(field), value)
feature.SetGeometry(ogr.CreateGeometryFromWkb(record['geometry'].wkb))
l.CreateFeature(feature)
if driver != 'Memory': # then write to file and flush the dataset
del ds
else: # we're done. return the dataset that was created in memory.
return ds
driver = OGRDriver
| apache-2.0 |
aruneral01/auto-sklearn | autosklearn/models/paramsklearn.py | 5 | 2588 | from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.regression import ParamSklearnRegressor
from autosklearn.constants import *
def get_configuration_space(info, include_estimators=None,
include_preprocessors=None):
if info['task'] in REGRESSION_TASKS:
return _get_regression_configuration_space(info, include_estimators,
include_preprocessors)
else:
return _get_classification_configuration_space(info,
include_estimators,
include_preprocessors)
def _get_regression_configuration_space(info, include_estimators=None,
include_preprocessors=None):
sparse = False
if info['is_sparse'] == 1:
sparse = True
configuration_space = ParamSklearnRegressor. \
get_hyperparameter_search_space(include_estimators=include_estimators,
include_preprocessors=include_preprocessors,
dataset_properties={'sparse': sparse})
return configuration_space
def _get_classification_configuration_space(info, include_estimators=None,
include_preprocessors=None):
task_type = info['task']
multilabel = False
multiclass = False
sparse = False
if task_type == MULTILABEL_CLASSIFICATION:
multilabel = True
if task_type == REGRESSION:
raise NotImplementedError()
if task_type == MULTICLASS_CLASSIFICATION:
multiclass = True
pass
if task_type == BINARY_CLASSIFICATION:
pass
if info['is_sparse'] == 1:
sparse = True
dataset_properties = {'multilabel': multilabel, 'multiclass': multiclass,
'sparse': sparse}
return ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties=dataset_properties,
include_estimators=include_estimators,
include_preprocessors=include_preprocessors)
# exclude_preprocessors=["sparse_filtering"])
def get_model(configuration, seed):
if 'classifier' in configuration:
return ParamSklearnClassifier(configuration, seed)
elif 'regressor' in configuration:
return ParamSklearnRegressor(configuration, seed)
def get_class(info):
if info['task'] in REGRESSION_TASKS:
return ParamSklearnRegressor
else:
return ParamSklearnClassifier
| bsd-3-clause |
lin-credible/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
fengzhyuan/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
rajeshb/SelfDrivingCar | T1P5-Vehicle-Tracking/lesson_functions.py | 1 | 15834 | import matplotlib.image as mpimg
import numpy as np
import cv2
from skimage.feature import hog
from scipy.ndimage.measurements import label
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
# FIXED, AFTER imread() of PNGs: NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(imgs, file_type = 'jpg', color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
file_features = []
# Read in each one by one
image = mpimg.imread(file)
# FIX for "NEED TO CHANGE bins_range if reading .png files with mpimg!"
if file_type == 'png':
image = (image * 255).astype(np.uint8)
# apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(image)
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat == True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat == True:
# Call get_hog_features() with vis=False, feature_vec=True
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
features.append(np.concatenate(file_features))
# Return list of feature vectors
return features
# Define a function that takes
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = 1280
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = 720
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
#1) Define an empty list to receive features
img_features = []
#2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
#9) Return concatenated array of features
return np.concatenate(img_features)
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
#img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell)-1
nyblocks = (ch1.shape[0] // pix_per_cell)-1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell)-1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
window_list = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
startx = xbox_left
starty = ytop_draw+ystart
endx = xbox_left+win_draw
endy = ytop_draw+win_draw+ystart
window_list.append(((startx, starty), (endx, endy)))
return window_list
| mit |
UCBerkeleySETI/breakthrough | APF/Tutorial/poly_curve_order_fit_github.py | 1 | 3151 | import matplotlib.pyplot as plt
import numpy as np
import pyfits as pf
def get_coeffs(file_name):
# open order coefficients and read values
coeff_array = np.zeros((79,5))
with open(file_name, "r") as text:
lines = text.read().splitlines()
for i in range(len(lines)):
a0 = float(lines[i][6:13].strip())
a1 = float(lines[i][17:26].strip())
a2 = float(lines[i][27:39].strip())
a3 = float(lines[i][40:52].strip())
a4 = float(lines[i][54:].strip())
coeffs = np.array([a0,a1,a2,a3,a4])
coeff_array[i] += coeffs
return coeff_array
def plot_with_polys(raw_array, coeff_array):
plt.imshow(raw_array, cmap = "gray", origin = "lower",
aspect = "auto", vmin = np.median(raw_array),
vmax = np.median(raw_array) *1.1)
plt.title("Raw image with polynomial functions overplotted")
x = np.arange(0,4608)
for i in range(coeff_array[:,0].size):
a0 = coeff_array[i,0]
a1 = coeff_array[i,1]
a2 = coeff_array[i,2]
a3 = coeff_array[i,3]
a4 = coeff_array[i,4]
plt.plot(x, a0 + a1*x + a2*x**2 + a3*x**3 + a4*x**4)
def reduce_raw_data(image, coeff_array, bias):
x = np.arange(0, 4608)
y_values = np.zeros((79,4608))
reduced_image = np.zeros((79,4608))
for i in range(coeff_array[:,0].size):
a0 = coeff_array[i,0]
a1 = coeff_array[i,1]
a2 = coeff_array[i,2]
a3 = coeff_array[i,3]
a4 = coeff_array[i,4]
for j in range(x.size):
y = a0 + a1*x[j] + a2*x[j]**2 + a3*x[j]**3 + a4*x[j]**4
y_values[i,j] = y
y = int(round(y))
reduced_image[i,j] = int(np.sum(image[y-1:y+2,j],
axis = 0)-3*bias)
return reduced_image, y_values
if __name__ == "__main__":
# opening the APF Data and plotting the data as a 2D array
apf_file = pf.open('ucb-amp194.fits')
image = np.fliplr(np.rot90(apf_file[0].data))
# ^ flips and rotates the original array by 90 degrees
# to make it correct (as far as website photo goes)
header = apf_file[0].header
# Calculate the bias
bias = np.median(image[-30:])
# get reduced image
apf_reduced = pf.open('ramp.194.fits')
header_red = apf_reduced[0].header
image_red = apf_reduced[0].data
# creates an array of polynomial coefficients for each order
coeff_array = get_coeffs("order_coefficients.txt")
# plots reduced data array with overlaying polynomial functions
plt.figure(figsize=(12,8))
plot_with_polys(image, coeff_array)
# extracting each order and creating a reduced image to plot
reduced_image, y_values = reduce_raw_data(image, coeff_array, bias)
plt.figure(figsize=(12,8))
plt.subplot(2, 1, 1)
plt.imshow(reduced_image, cmap = "gray", origin = "lower",
aspect = "auto", vmin = np.median(reduced_image),
vmax = np.median(reduced_image) *1.1)
plt.title("Reduced Image through Polyfit Technique")
plt.subplot(2, 1, 2)
plt.title("Reduced Image File")
plt.imshow(image_red, cmap = "gray", origin = "lower",
aspect = "auto", vmin = np.median(image_red),
vmax = np.median(image_red) *1.1)
plt.figure(figsize=(12,8))
plt.subplot(2, 1, 1)
plt.plot(reduced_image[53])
plt.title("Reduced Image (Polyfit) H-alpha")
plt.subplot(2, 1, 2)
plt.plot(image_red[53])
plt.title("Reduced Image File H-alpha")
plt.show()
| gpl-3.0 |
nmayorov/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
lixun910/pysal | pysal/lib/io/geotable/utils.py | 3 | 2349 | from ...cg.shapes import asShape as pShape
from ...common import requires as _requires
from warnings import warn
@_requires('geopandas')
def to_df(df, geom_col='geometry', **kw):
"""
Convert a Geopandas dataframe into a normal pandas dataframe with a column
containing PySAL shapes.
Arguments
---------
df : geopandas.GeoDataFrame
a geopandas dataframe (or pandas dataframe) with a column
containing geo-interfaced shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to pandas.DataFrame(...,**kw)
See Also
--------
pandas.DataFrame
"""
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
df[geom_col] = df[geom_col].apply(pShape)
if isinstance(df, (GeoDataFrame, GeoSeries)):
df = pd.DataFrame(df, **kw)
return df
@_requires('geopandas')
def to_gdf(df, geom_col='geometry', **kw):
"""
Convert a pandas dataframe with geometry column to a GeoPandas dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a column containing geo-interfaced
shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to geopandas.GeoDataFrame(...,**kw)
See Also
--------
geopandas.GeoDataFrame
"""
from geopandas import GeoDataFrame
from shapely.geometry import asShape as sShape
df[geom_col] = df[geom_col].apply(sShape)
return GeoDataFrame(df, geometry=geom_col, **kw)
def insert_metadata(df, obj, name=None, inplace=True, overwrite=False):
if not inplace:
new = df.copy(deep=True)
insert_metadata(new, obj, name=name, inplace=True)
return new
if name is None:
name = type(obj).__name__
if hasattr(df, name):
if overwrite:
warn('Overwriting attribute {}! This may break the dataframe!'.format(name))
else:
raise Exception('Dataframe already has attribute {}. Cowardly refusing '
'to break dataframe. '.format(name))
df._metadata.append(name)
df.__setattr__(name, obj)
| bsd-3-clause |
tony810430/flink | flink-python/pyflink/table/tests/test_row_based_operation.py | 2 | 16850 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table import expressions as expr, ListView
from pyflink.table.types import DataTypes
from pyflink.table.udf import udf, udtf, udaf, AggregateFunction, TableAggregateFunction, udtaf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBlinkBatchTableTestCase, \
PyFlinkBlinkStreamTableTestCase
class RowBasedOperationTests(object):
def test_map(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
func = udf(lambda x: Row(a=x + 1, b=x * x), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
func2 = udf(lambda x: Row(x.a + 1, x.b * 2), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
t.map(func(t.b)).alias("a", "b") \
.map(func(t.a)) \
.map(func2) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual, ["+I[5, 18]", "+I[4, 8]", "+I[8, 72]", "+I[11, 162]", "+I[6, 32]"])
def test_map_with_pandas_udf(self):
t = self.t_env.from_elements(
[(1, Row(2, 3)), (2, Row(1, 3)), (1, Row(5, 4)), (1, Row(8, 6)), (2, Row(3, 4))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b",
DataTypes.ROW([DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("d", DataTypes.INT())]))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
def func(x):
import pandas as pd
res = pd.concat([x.a, x.c + x.d], axis=1)
return res
def func2(x):
return x * 2
def func3(x):
assert isinstance(x, Row)
return x
pandas_udf = udf(func,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
pandas_udf_2 = udf(func2,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
general_udf = udf(func3,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]))
t.map(pandas_udf).map(pandas_udf_2).map(general_udf).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[4, 8]", "+I[2, 10]", "+I[2, 28]", "+I[2, 18]", "+I[4, 14]"])
def test_flat_map(self):
t = self.t_env.from_elements(
[(1, "2,3"), (2, "1"), (1, "5,6,7")],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.STRING())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(),
DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
@udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
def split(x):
for s in x.b.split(","):
yield x.a, s
t.flat_map(split).alias("a", "b") \
.flat_map(split).alias("a", "b") \
.join_lateral(split.alias("c", "d")) \
.left_outer_join_lateral(split.alias("e", "f")) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 2, 1, 2, 1, 2]", "+I[1, 3, 1, 3, 1, 3]", "+I[2, 1, 2, 1, 2, 1]",
"+I[1, 5, 1, 5, 1, 5]", "+I[1, 6, 1, 6, 1, 6]", "+I[1, 7, 1, 7, 1, 7]"])
class BatchRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkBatchTableTestCase):
def test_aggregate_with_pandas_udaf(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.TINYINT(), DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.a, t.b) \
.group_by(t.a) \
.aggregate(pandas_udaf) \
.select("*") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 5.0, 1]", "+I[2, 2.0, 2]"])
def test_aggregate_with_pandas_udaf_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'],
[DataTypes.FLOAT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: Row(pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.b) \
.aggregate(pandas_udaf.alias("a", "b")) \
.select("a, b") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3.8, 8]"])
def test_window_aggregate_with_pandas_udaf(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[
DataTypes.TIMESTAMP(3),
DataTypes.FLOAT(),
DataTypes.INT()
])
self.t_env.register_table_sink("Results", table_sink)
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.select(t.b, t.rowtime) \
.window(tumble_window) \
.group_by("w") \
.aggregate(pandas_udaf.alias("d", "e")) \
.select("w.rowtime, d, e") \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:59:59.999, 2.2, 3]",
"+I[2018-03-11 04:59:59.999, 8.0, 8]"])
class StreamRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBlinkStreamTableTestCase):
def test_aggregate(self):
import pandas as pd
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
function = CountAndSumAggregateFunction()
agg = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
result = t.group_by(t.a) \
.aggregate(agg.alias("c", "d")) \
.select("a, c, d") \
.to_pandas()
assert_frame_equal(result.sort_values('a').reset_index(drop=True),
pd.DataFrame([[1, 3, 15], [2, 2, 4]], columns=['a', 'c', 'd']))
def test_flat_aggregate(self):
import pandas as pd
mytop = udtaf(Top2())
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(5, 'Hi2', 'hi'),
(7, 'Hi', 'Hello'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a, t.c) \
.group_by(t.c) \
.flat_aggregate(mytop.alias('a')) \
.select(t.a) \
.flat_aggregate(mytop.alias("b")) \
.select("b") \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[7], [5]], columns=['b']))
def test_flat_aggregate_list_view(self):
import pandas as pd
my_concat = udtaf(ListViewConcatTableAggregateFunction())
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.flat_aggregate(my_concat(t.b, ',').alias("b")) \
.select(t.b, t.c) \
.alias("a, c")
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"]],
columns=['a', 'c']))
class CountAndSumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
from pyflink.common import Row
return Row(accumulator[0], accumulator[1])
def create_accumulator(self):
from pyflink.common import Row
return Row(0, 0)
def accumulate(self, accumulator, row: Row):
accumulator[0] += 1
accumulator[1] += row.b
def retract(self, accumulator, row: Row):
accumulator[0] -= 1
accumulator[1] -= row.a
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] += other_acc[0]
accumulator[1] += other_acc[1]
def get_accumulator_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
class Top2(TableAggregateFunction):
def emit_value(self, accumulator):
yield accumulator[0]
yield accumulator[1]
def create_accumulator(self):
return [None, None]
def accumulate(self, accumulator, row: Row):
if row.a is not None:
if accumulator[0] is None or row.a > accumulator[0]:
accumulator[1] = accumulator[0]
accumulator[0] = row.a
elif accumulator[1] is None or row.a > accumulator[1]:
accumulator[1] = row.a
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
self.accumulate(accumulator, other_acc[0])
self.accumulate(accumulator, other_acc[1])
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class ListViewConcatTableAggregateFunction(TableAggregateFunction):
def emit_value(self, accumulator):
result = accumulator[1].join(accumulator[0])
yield Row(result)
yield Row(result)
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([DataTypes.FIELD("a", DataTypes.STRING())])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
chrinide/PyFV | pyfv/elm/plot_elm_comparison.py | 1 | 7043 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
======================
ELM Classifiers Comparison
======================
A comparison of a several ELMClassifiers with different types of hidden
layer activations.
ELMClassifier is a classifier based on the Extreme Learning Machine,
a single layer feedforward network with random hidden layer components
and least squares fitting of the hidden->output weights by default [1][2]
The point of this example is to illustrate the nature of decision boundaries
with different hidden layer activation types and regressors.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
In particular in high dimensional spaces data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
References
__________
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
===============================================================================
Basis Functions:
gaussian rbf : exp(-gamma * (||x-c||/r)^2)
tanh : np.tanh(a)
sinsq : np.power(np.sin(a), 2.0)
tribas : np.clip(1.0 - np.fabs(a), 0.0, 1.0)
hardlim : np.array(a > 0.0, dtype=float)
where x : input pattern
a : dot_product(x, c) + b
c,r : randomly generated components
Label Legend:
ELM(10,tanh) :10 tanh units
ELM(10,tanh,LR) :10 tanh units, LogisticRegression
ELM(10,sinsq) :10 sin*sin units
ELM(10,tribas) :10 tribas units
ELM(10,hardlim) :10 hardlim units
ELM(20,rbf(0.1)) :20 rbf units gamma=0.1
"""
print __doc__
# Code source: Gael Varoqueux
# Andreas Mueller
# Modified for Documentation merge by Jaques Grobler
# Modified for Extreme Learning Machine Classifiers by David Lambert
# License: BSD
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from elm import GenELMClassifier
from random_layer import RBFRandomLayer, MLPRandomLayer
def get_data_bounds(X):
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return (x_min, x_max, y_min, y_max, xx, yy)
def plot_data(ax, X_train, y_train, X_test, y_test, xx, yy):
cm = ListedColormap(['#FF0000', '#0000FF'])
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
def plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z):
cm = pl.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - 0.3, yy.min() + 0.3, ('%.2f' % score).lstrip('0'),
size=13, horizontalalignment='right')
def make_datasets():
return [make_moons(n_samples=200, noise=0.3, random_state=0),
make_circles(n_samples=200, noise=0.2, factor=0.5, random_state=1),
make_linearly_separable()]
def make_classifiers():
names = ["ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)",
"ELM(10,tribas)", "ELM(hardlim)", "ELM(20,rbf(0.1))"]
nh = 10
# pass user defined transfer func
sinsq = (lambda x: np.power(np.sin(x), 2.0))
srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq)
# use internal transfer funcs
srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')
# use gaussian RBF
srhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0)
log_reg = LogisticRegression()
classifiers = [GenELMClassifier(hidden_layer=srhl_tanh),
GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg),
GenELMClassifier(hidden_layer=srhl_sinsq),
GenELMClassifier(hidden_layer=srhl_tribas),
GenELMClassifier(hidden_layer=srhl_hardlim),
GenELMClassifier(hidden_layer=srhl_rbf)]
return names, classifiers
def make_linearly_separable():
X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
n_informative=2, random_state=1,
n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
return (X, y)
###############################################################################
datasets = make_datasets()
names, classifiers = make_classifiers()
i = 1
figure = pl.figure(figsize=(18, 9))
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4,
random_state=0)
x_min, x_max, y_min, y_max, xx, yy = get_data_bounds(X)
# plot dataset first
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
plot_data(ax, X_train, y_train, X_test, y_test, xx, yy)
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z)
i += 1
figure.subplots_adjust(left=.02, right=.98)
pl.show() | gpl-2.0 |
fbagirov/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.0/tutorials/general_concepts.py | 1 | 23210 | #!/usr/bin/env python
# coding: utf-8
# General Concepts
# ======================
#
# **HOW TO RUN THIS FILE**: if you're running this in a Jupyter notebook or Google Colab session, you can click on a cell and then shift+Enter to run the cell and automatically select the next cell. Alt+Enter will run a cell and create a new cell below it. Ctrl+Enter will run a cell but keep it selected. To restart from scratch, restart the kernel/runtime.
#
# This tutorial introduces all the general concepts of dealing with Parameters, ParameterSets, and the Bundle. This tutorial aims to be quite complete - covering almost everything you can do with Parameters, so on first read you may just want to try to get familiar, and then return here as a reference for any details later.
#
# All of these tutorials assume basic comfort with Python in general - particularly with the concepts of lists, dictionaries, and objects as well as basic comfort with using the numpy and matplotlib packages.
#
# Setup
# ----------------------------------------------
#
#
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# Let's get started with some basic imports
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
# If running in IPython notebooks, you may see a "ShimWarning" depending on the version of Jupyter you are using - this is safe to ignore.
#
# PHOEBE 2 uses constants defined in the IAU 2015 Resolution which conflict with the constants defined in astropy. As a result, you'll see the warnings as phoebe.u and phoebe.c "hijacks" the values in astropy.units and astropy.constants.
#
# Whenever providing units, please make sure to use phoebe.u instead of astropy.units, otherwise the conversions may be inconsistent.
# ### Logger
#
# Before starting any script, it is a good habit to initialize a logger and define which levels of information you want printed to the command line (clevel) and dumped to a file (flevel).
#
# The levels from most to least information are:
#
# * DEBUG
# * INFO
# * WARNING
# * ERROR
# * CRITICAL
#
# In[2]:
logger = phoebe.logger(clevel='WARNING', flevel='DEBUG', filename='tutorial.log')
# All of these arguments are optional and will default to clevel='WARNING' if not provided. There is therefore no need to provide a filename if you don't provide a value for flevel.
#
# So with this logger, anything with "WARNING, ERROR, or CRITICAL levels will be printed to the screen. All messages of any level will be written to a file named 'tutorial.log' in the current directory.
#
# Note: the logger messages are not included in the outputs shown below.
#
#
#
# Parameters
# ------------------------
#
# Parameters hold a single value, but need to be aware about their own types, limits, and connection with other Parameters (more on this later when we discuss ParameterSets).
#
# Note that generally you won't ever have to "create" or "define" your own Parameters, those will be created for you by helper functions, but we have to start somewhere... so let's create our first Parameter.
#
# We'll start with creating a StringParameter since it is the most generic, and then discuss and specific differences for each type of Parameter.
# In[3]:
param = phoebe.parameters.StringParameter(qualifier='myparameter',
description='mydescription',
value='myvalue')
# If you ever need to know the type of a Parameter, you can always use python's built-in type functionality:
# In[4]:
print type(param)
# If we print the parameter object we can see a summary of information
# In[5]:
print param
# You can see here that we've defined three a few things about parameter: the qualifier, description, and value (others do exist, they just don't show up in the summary).
#
# These "things" can be split into two groups: tags and attributes (although in a pythonic sense, both can be accessed as attributes). Don't worry too much about this distinction - it isn't really important except for the fact that tags are shared across **all** Parameters whereas attributes are dependent on the type of the Parameter.
#
# The tags of a Parameter define the Parameter and how it connects to other Parameters (again, more on this when we get to ParameterSets). For now, just now that you can access a list of all the tags as follows:
# In[6]:
print param.meta
# and that each of these is available through both a dictionary key and an object attribute. For example:
# In[7]:
print param['qualifier'], param.qualifier
# The 'qualifier' attribute is essentially an abbreviated name for the Parameter.
#
# These tags will be shared across **all** Parameters, regardless of their type.
#
# Attributes, on the other hand, can be dependent on the type of the Parameter and tell the Parameter its rules and how to interpret its value. You can access a list of available attributes as follows:
# In[8]:
param.attributes
# and again, each of these are available through both a dictionary key and as an object attribute. For example, all parameters have a **'description'** attribute which gives additional information about what the Parameter means:
# In[9]:
print param['description'], param.description
# For the special case of the **'value'** attribute, there is also a get method (will become handy later when we want to be able to request the value in a specific unit).
# In[10]:
print param.get_value(), param['value'], param.value
# The value attribute is also the only attribute that you'll likely want to change, so it also has a set method:
# In[11]:
param.set_value('newvalue')
print param.get_value()
# The **'visible_if'** attribute only comes into play when the Parameter is a member of a ParameterSet, so we'll discuss it at the end of this tutorial when we get to ParameterSets.
#
# The **'copy_for'** attribute is only used when the Parameter is in a particular type of ParameterSet called a Bundle (explained at the very end of this tutorial). We'll see the 'copy_for' capability in action later in the [Datasets Tutorial](datasets), but for now, just know that you can *view* this property only and cannot change it... and most of the time it will just be an empty string.
# ### StringParameters
#
# We'll just mention StringParameters again for completeness, but we've already seen about all they can do - the value must cast to a valid string but no limits or checks are performed at all on the value.
# ### ChoiceParameters
#
# ChoiceParameters are essentially StringParameters with one very important exception: the value **must** match one of the prescribed choices.
#
# Therefore, they have a 'choice' attribute and an error will be raised if trying to set the value to any string not in that list.
# In[12]:
param = phoebe.parameters.ChoiceParameter(qualifier='mychoiceparameter',
description='mydescription',
choices=['choice1', 'choice2'],
value='choice1')
# In[13]:
print param
# In[14]:
print param.attributes
# In[15]:
print param['choices'], param.choices
# In[16]:
print param.get_value()
# In[17]:
#param.set_value('not_a_choice') # would raise a ValueError
param.set_value('choice2')
print param.get_value()
# ### FloatParameters
#
# FloatParameters are probably the most common Parameter used in PHOEBE and hold both a float and a unit, with the ability to retrieve the value in any other convertible unit.
# In[18]:
param = phoebe.parameters.FloatParameter(qualifier='myfloatparameter',
description='mydescription',
default_unit=u.m,
limits=[None,20],
value=5)
# In[19]:
print param
# You'll notice here a few new mentions in the summary... "Constrained by", "Constrains", and "Related to" are all referring to [constraints which will be discussed in a future tutorial](constraints).
# In[20]:
print param.attributes
# FloatParameters have an attribute which hold the "limits" - whenever a value is set it will be checked to make sure it falls within the limits. If either the lower or upper limit is None, then there is no limit check for that extreme.
# In[21]:
print param['limits'], param.limits
# In[22]:
#param.set_value(30) # would raise a ValueError
param.set_value(2)
print param.get_value()
# FloatParameters have an attribute which holds the "default_unit" - this is the unit in which the value is stored **and** the unit that will be provided if not otherwise overriden.
# In[23]:
print param['default_unit'], param.default_unit
# Calling get_value will then return a float in these units
# In[24]:
print param.get_value()
# But we can also request the value in a different unit, by passing an [astropy Unit object](http://docs.astropy.org/en/stable/units/) or its string representation.
# In[25]:
print param.get_value(unit=u.km), param.get_value(unit='km')
# FloatParameters also have their own method to access an [astropy Quantity object](http://docs.astropy.org/en/stable/units/) that includes both the value and the unit
# In[26]:
print param.get_quantity(), param.get_quantity(unit=u.km)
# The set_value method also accepts a unit - this doesn't change the default_unit internally, but instead converts the provided value before storing.
# In[27]:
param.set_value(10)
print param.get_quantity()
# In[28]:
param.set_value(0.001*u.km)
print param.get_quantity()
# In[29]:
param.set_value(10, unit='cm')
print param.get_quantity()
# If for some reason you want to change the default_unit, you can do so as well:
# In[30]:
param.set_default_unit(u.km)
print param.get_quantity()
# But note that the limits are still stored as a quantity object in the originally defined default_units
# In[31]:
print param.limits
# ### IntParameters
#
# IntParameters are essentially the same as FloatParameters except they always cast to an Integer and they have no units.
# In[32]:
param = phoebe.parameters.IntParameter(qualifier='myintparameter',
description='mydescription',
limits=[0,None],
value=1)
# In[33]:
print param
# In[34]:
print param.attributes
# Like FloatParameters above, IntParameters still have limits
# In[35]:
print param['limits'], param.limits
# Note that if you try to set the value to a float it will not raise an error, but will cast that value to an integer (following python rules of truncation, not rounding)
# In[36]:
param.set_value(1.9)
print param.get_value()
# ### Bool Parameters
#
# Boolean Parameters are even simpler - they accept True or False.
# In[37]:
param = phoebe.parameters.BoolParameter(qualifier='myboolparameter',
description='mydescription',
value=True)
# In[38]:
print param
# In[39]:
print param.attributes
# Note that, like IntParameters, BoolParameters will attempt to cast anything you give it into True or False.
# In[40]:
param.set_value(0)
print param.get_value()
# In[41]:
param.set_value(None)
print param.get_value()
# As with Python, an empty string will cast to False and a non-empty string will cast to True
# In[42]:
param.set_value('')
print param.get_value()
# In[43]:
param.set_value('some_string')
print param.get_value()
# The only exception to this is that (unlike Python), 'true' or 'True' will cast to True and 'false' or 'False' will cast to False.
# In[44]:
param.set_value('False')
print param.get_value()
# In[45]:
param.set_value('false')
print param.get_value()
# ### FloatArrayParameters
#
# FloatArrayParameters are essentially the same as FloatParameters (in that they have the same unit treatment, although obviously no limits) but hold numpy arrays rather than a single value.
#
# By convention in Phoebe, these will (almost) always have a pluralized qualifier.
# In[46]:
param = phoebe.parameters.FloatArrayParameter(qualifier='myfloatarrayparameters',
description='mydescription',
default_unit=u.m,
value=np.array([0,1,2,3]))
# In[47]:
print param
# In[48]:
print param.attributes
# In[49]:
print param.get_value(unit=u.km)
# FloatArrayParameters also allow for built-in interpolation... but this requires them to be a member of a Bundle, so we'll discuss this in just a bit.
# ParametersSets
# ----------------------------
#
# ParameterSets are a collection of Parameters that can be filtered by their tags to return another ParameterSet.
#
# For illustration, let's create 3 random FloatParameters and combine them to make a ParameterSet.
# In[50]:
param1 = phoebe.parameters.FloatParameter(qualifier='param1',
description='param1 description',
default_unit=u.m,
limits=[None,20],
value=5,
context='context1',
kind='kind1')
param2 = phoebe.parameters.FloatParameter(qualifier='param2',
description='param2 description',
default_unit=u.deg,
limits=[0,2*np.pi],
value=0,
context='context2',
kind='kind2')
param3 = phoebe.parameters.FloatParameter(qualifier='param3',
description='param3 description',
default_unit=u.kg,
limits=[0,2*np.pi],
value=0,
context='context1',
kind='kind2')
# In[51]:
ps = phoebe.parameters.ParameterSet([param1, param2, param3])
# In[52]:
print ps.to_list()
# If we print a ParameterSet, we'll see a listing of all the Parameters and their values.
# In[53]:
print ps
# ### Twigs
# The string notation used for the Parameters is called a 'twig' - its simply a combination of all the tags joined with the '@' symbol and gives a very convenient way to access any Parameter.
#
# The order of the tags doesn't matter, and you only need to provide enough tags to produce a unique match. Since there is only one parameter with kind='kind1', we do not need to provide the extraneous context='context1' in the twig to get a match.
# In[54]:
print ps.get('param1@kind1')
# Note that this returned the ParameterObject itself, so you can now use any of the Parameter methods or attributes we saw earlier. For example:
# In[55]:
print ps.get('param1@kind1').description
# But we can also use set and get_value methods from the ParameterSet itself:
# In[56]:
ps.set_value('param1@kind1', 10)
print ps.get_value('param1@kind1')
# ### Tags
# Each Parameter has a number of tags, and the ParameterSet has the same tags - where the value of any given tag is None if not shared by all Parameters in that ParameterSet.
#
# So let's just print the names of the tags again and then describe what each one means.
# In[57]:
print ps.meta.keys()
# Most of these "metatags" act as labels - for example, you can give a component tag to each of the components for easier referencing.
#
# But a few of these tags are fixed and not editable:
#
# * qualifier: literally the name of the parameter.
# * kind: tells what kind a parameter is (ie whether a component is a star or an orbit).
# * context: tells what context this parameter belongs to
# * twig: a shortcut to the parameter in a single string.
# * uniquetwig: the minimal twig needed to reach this parameter.
# * uniqueid: an internal representation used to reach this parameter
#
# These contexts are (you'll notice that most are represented in the tags):
#
# * setting
# * history
# * system
# * component
# * feature
# * dataset
# * constraint
# * compute
# * model
# * fitting [not yet supported]
# * feedback [not yet supported]
# * plugin [not yet supported]
#
# One way to distinguish between context and kind is with the following question and answer:
#
# "What kind of **[context]** is this? It's a **[kind]** tagged **[context]**=**[tag-with-same-name-as-context]**."
#
# In different cases, this will then become:
#
# * "What kind of **component** is this? It's a **star** tagged **component**=**starA**." (context='component', kind='star', component='starA')
# * "What kind of **feature** is this? It's a **spot** tagged **feature**=**spot01**." (context='feature', kind='spot', feature='spot01')
# * "What kind of **dataset** is this? It's a **LC (light curve)** tagged **dataset**=**lc01**." (context='dataset', kind='LC', dataset='lc01')
# * "What kind of **compute** (options) are these? They're **phoebe** (compute options) tagged **compute**=**preview**." (context='compute', kind='phoebe', compute='preview')
#
#
# As we saw before, these tags can be accessed at the Parameter level as either a dictionary key or as an object attribute. For ParameterSets, the tags are only accessible through object attributes.
# In[58]:
print ps.context
# This returns None since not all objects in this ParameterSet share a single context. But you can see all the options for a given tag by providing the plural version of that tag name:
# In[59]:
print ps.contexts
# ### Filtering
#
# Any of the tags can also be used to filter the ParameterSet:
# In[60]:
print ps.filter(context='context1')
# Here we were returned a ParameterSet of all Parameters that matched the filter criteria. Since we're returned another ParameterSet, we can chain additional filter calls together.
# In[61]:
print ps.filter(context='context1', kind='kind1')
# Now we see that we have drilled down to a single Parameter. Note that a ParameterSet is still returned - filter will *always* return a ParameterSet.
#
# We could have accomplished the exact same thing with a single call to filter:
# In[62]:
print ps.filter(context='context1', kind='kind1')
# If you want to access the actual Parameter, you must use get instead of (or in addition to) filter. All of the following lines do the exact same thing:
# In[63]:
print ps.filter(context='context1', kind='kind1').get()
# In[64]:
print ps.get(context='context1', kind='kind1')
# Or we can use those twigs. Remember that twigs are just a combination of these tags separated by the @ symbol. You can use these for dictionary access in a ParameterSet - without needing to provide the name of the tag, and without having to worry about order. And whenever this returns a ParameterSet, these are also chainable, so the following two lines will do the same thing:
# In[65]:
print ps['context1@kind1']
# In[66]:
print ps['context1']['kind1']
# You may notice that the final result was a Parameter, not a ParameterSet. Twig dictionary access tries to be smart - if exactly 1 Parameter is found, it will return that Parameter instead of a ParameterSet. Notice the difference between the two following lines:
# In[67]:
print ps['context1']
# In[68]:
print ps['context1@kind1']
# Of course, once you get the Parameter you can then use dictionary keys to access any attributes of that Parameter.
# In[69]:
print ps['context1@kind1']['description']
# So we decided we might as well allow access to those attributes directly from the twig as well
# In[70]:
print ps['description@context1@kind1']
# The Bundle
# ------------
#
# The Bundle is nothing more than a glorified ParameterSet with some extra methods to compute models, add new components and datasets, etc.
#
# You can initialize an empty Bundle as follows:
# In[71]:
b = phoebe.Bundle()
print b
# and filter just as you would for a ParameterSet
# In[72]:
print b.filter(context='system')
# ### Visible If
#
# As promised earlier, the 'visible_if' attribute of a Parameter controls whether its visible to a ParameterSet... but it only does anything if the Parameter belongs to a Bundle.
#
# Let's make a new ParameterSet in which the visibility of one parameter is dependent on the value of another.
# In[73]:
param1 = phoebe.parameters.ChoiceParameter(qualifier='what_is_this',
choices=['matter', 'aether'],
value='matter',
context='context1')
param2 = phoebe.parameters.FloatParameter(qualifier='mass',
default_unit=u.kg,
value=5,
visible_if='what_is_this:matter',
context='context1')
b = phoebe.Bundle([param1, param2])
# In[74]:
print b.filter()
# It doesn't make much sense to need to define a mass if this thing isn't baryonic. So if we change the value of 'what_is_this' to 'aether' then the 'mass' Parameter will temporarily hide itself.
# In[75]:
b.set_value('what_is_this', 'aether')
print b.filter()
# ### FloatArrayParameters: interpolation
#
# As mentioned earlier, when a part of a Bundle, FloatArrayParameters can handle simple linear interpolation with respect to another FloatArrayParameter in the same Bundle.
# In[76]:
xparam = phoebe.parameters.FloatArrayParameter(qualifier='xs',
default_unit=u.d,
value=np.linspace(0,1,10),
context='context1')
yparam = phoebe.parameters.FloatArrayParameter(qualifier='ys',
default_unit=u.m,
value=np.linspace(0,1,10)**2,
context='context1')
b = phoebe.Bundle([xparam, yparam])
# In[77]:
b.filter('ys').get().twig
# In[78]:
b['ys'].get_value()
# Now we can interpolate the 'ys' param for any given value of 'xs'
# In[79]:
b['ys'].interp_value(xs=0)
# In[80]:
b['ys'].interp_value(xs=0.2)
# **NOTE**: interp_value does not (yet) support passing a unit.. it will always return a value (not a quantity) and will always be in the default_unit.
# Next
# ----------
#
# Next up: let's [build a system](building_a_system.ipynb)
#
| gpl-3.0 |
h-mayorquin/camp_india_2016 | tutorials/LTPinnetworks2/Step1d_weight_distributions.py | 1 | 5120 | #!/usr/bin/env python
'''
Author: Aditya Gilra, Jun 2016. (with inputs from Matthieu Gilson)
in Brian2rc3 for CAMP 2016.
'''
#import modules and functions to be used
from brian2 import * # importing brian also does:
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
stand_alone = True
if stand_alone: set_device('cpp_standalone', build_on_run=False)
else:
#prefs.codegen.target = 'numpy'
prefs.codegen.target = 'weave'
#prefs.codegen.target = 'cython'
import random
import time
# http://stackoverflow.com/questions/31057197/should-i-use-random-seed-or-numpy-random-seed-to-control-random-number-gener
np.random.seed(0) # set seed for reproducibility of simulations
random.seed(0) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.1*ms
simtime = 10.0*second
defaultclock.dt = simdt # set Brian's sim time step
simdtraw = simdt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
taudelay = 0.75*ms # synaptic delay
tauA = 1*ms # synaptic epsp tauA
tauB = 5*ms # synaptic epsp tauB
eqs_neurons='''
dA/dt=-A/tauA : 1
dB/dt=-B/tauB : 1
rho_out = (A-B)/(tauA-tauB) : Hz
'''
# ###########################################
# Network parameters: numbers
# ###########################################
Ninp = 2000 # Number of neurons per pool
nu0 = 10*Hz # spiking rate of inputs
# ###########################################
# Network parameters: synaptic plasticity
# ###########################################
eta = 1e-2 # learning rate (as in paper)
Apre_tau = 17*ms # STDP Apre (LTP) time constant
Apost_tau = 34*ms # STDP Apost (LTD) time constant
stdp_eqns = ''' w : 1
dApre/dt=-Apre/Apre_tau : 1 (event-driven)
dApost/dt=-Apost/Apost_tau : 1 (event-driven)
'''
w0 = 0.05 # reference weight
Apre0 = w0 # incr in Apre (LTP), on pre-spikes;
# at spike coincidence, delta w = -Apre0*eta
Apost0 = Apre0 * Apre_tau / Apost_tau
# incr in Apost (LTD) on post spike
beta = 50 # LTP decay factor
alpha = 5 # LTD curvature factor
std_noise = 2.0 # std deviation of noise
pre_eqns = 'Apre+=Apre0; w = clip(w - eta*Apost*log(1+w/w0*alpha)/log(1+alpha)*(randn()*std_noise+1),0,inf)'
post_eqns = 'Apost+=Apost0; w = clip(w + eta*Apre*exp(-w/w0/beta), 0,inf)'
winit = w0 # initial weights are from 0 to winit
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
# post-synaptic neuron
P=NeuronGroup(1,model=eqs_neurons,\
threshold='rand()<rho_out*dt',method='euler')
# ###########################################
# Stimuli
# ###########################################
Pinp1 = PoissonGroup(Ninp,rates=nu0)
# ###########################################
# Connecting the network
# ###########################################
con = Synapses(Pinp1,P,stdp_eqns,\
on_pre='A+=w*0.1;B+=w*0.1;'+pre_eqns,on_post=post_eqns,
method='euler')
con.connect(True)
con.delay = uniform(size=(Ninp,))*1.*ms + 4.*ms
con.w = uniform(size=(Ninp,))*2*winit
# ###########################################
# Setting up monitors
# ###########################################
sm = SpikeMonitor(P)
sminp1 = SpikeMonitor(Pinp1)
# Population monitor
popm = PopulationRateMonitor(P)
popminp1 = PopulationRateMonitor(Pinp1)
# voltage monitor
sm_rho = StateMonitor(P,'rho_out',record=[0])
# weights monitor
wm = StateMonitor(con,'w',record=range(Ninp), dt=simtime/100.)
# ###########################################
# Simulate
# ###########################################
# a simple run would not include the monitors
net = Network(collect()) # collects Brian2 objects in current context
print "Setup complete, running for",simtime,"at dt =",simdtraw,"s."
t1 = time.time()
net.run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
# ###########################################
# Make plots
# ###########################################
# always convert spikemon.t and spikemon.i to array-s before indexing
# spikemon.i[] indexing is extremely slow!
spiket = array(sm.t/second) # take spiketimes of all neurons
spikei = array(sm.i)
fig = figure()
subplot(131)
plot(popm.t/second,popm.smooth_rate(width=50.*ms,window="gaussian")/Hz,',-')
xlabel('time (s)')
ylabel('post-rate (Hz)')
# weight evolution
subplot(132)
plot(wm.t/second,transpose(wm.w[:20,:]),',-')
xlabel('time (s)')
ylabel("weight (arb)")
yticks([0,1,2])
title('weights evolution')
subplot(133)
hist(wm.w[:,-1],bins=50,edgecolor='none')
xlabel('weight')
ylabel('count')
show()
| mit |
tiagoantao/genomics-notebooks | src/parameter.py | 1 | 7816 | # -*- coding: utf-8 -*-
'''
.. module:: genomics
:synopsis: PopGen classes with simulations
:noindex:
:copyright: Copyright 2014 by Tiago Antao
:license: GNU Affero, see LICENSE for details
.. moduleauthor:: Tiago Antao <[email protected]>
'''
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA as pca
import simuOpt
simuOpt.setOptions(gui=False, quiet=True)
import simuPOP as sp
from simuPOP import sampling
def _get_sub_sample(pop, size, sub_pop=None):
'''Gets a subsample of individuals.'''
if sub_pop is None:
pop_s = pop
else:
pop_s = pop.extractSubPops(subPops=[sub_pop])
if size is None:
return pop_s
pop_s = sampling.drawRandomSample(pop_s, sizes=size)
return pop_s
class Parameter:
'''A simulation parameter. Absctract super-class.'''
def __init__(self, do_structured=False):
self.name = None
self.desc = None
self.do_structured = do_structured
self._sample_size = None
self._simupop_stats = []
self._info_fields = []
self._pop = None
def _get_values(self, pop):
'''Returns the parameter values for a certain subpopulation.
Implemented on concrete class.
'''
raise NotImplementedError('Needs to be implemented')
def get_values(self, pop, sub_pop=None):
'''Returns the parameter values for a certain subpopulation.'''
if self.do_structured:
pop_ = _get_sub_sample(pop, self.sample_size, sub_pop)
else:
pop_ = _get_sub_sample(pop, self.sample_size)
ind_values = self._get_values(pop_)
return ind_values
@property
def pop(self, pop):
'''Population'''
self._pop = pop
@pop.setter
def pop(self, value):
'''Population setter.'''
self._pop = value
@property
def simupop_stats(self):
'''Statistics that simupop needs to compute for this parameter.
This is normally added to evolve postOps.'''
return self._simupop_stats
@property
def sample_size(self):
'''Parameter sample size.'''
return self._sample_size
@sample_size.setter
def sample_size(self, value):
'''Sample size setter.'''
self._sample_size = value
@property
def info_fields(self):
'''Fields that need to be available on the Population object'''
return self._info_fields
@info_fields.setter
def info_fields(self, value):
'''Info_fields setter.'''
self._info_fields = value
class ObsHe(Parameter):
'''Observed Heterozygosity'''
def __init__(self):
Parameter.__init__(self)
self.name = 'ObsHe'
self.desc = 'Observed Heterozygozity'
def _get_values(self, pop):
stat = sp.Stat(heteroFreq=True)
stat.apply(pop)
loci = list(pop.dvars().heteroFreq.keys())
loci.sort()
return [pop.dvars().heteroFreq[l] for l in loci]
class ExpHe(Parameter):
'''Expected Heterozygosity'''
def __init__(self, **kwargs):
Parameter.__init__(self, kwargs)
self.name = 'ExpHe'
self.desc = 'Expected Heterozygozity'
def _get_values(self, pop):
stat = sp.Stat(alleleFreq=True)
stat.apply(pop)
freqs = pop.dvars().alleleFreq
loci = list(freqs.keys())
loci.sort()
exp_he = []
for locus in loci:
afreqs = freqs[locus]
exp_ho = 0
for freq in afreqs.values():
exp_ho += freq * freq
exp_he.append(1 - exp_ho)
return exp_he
class NumAlleles(Parameter):
'''Number of Alleles'''
def __init__(self, **kwargs):
Parameter.__init__(self, kwargs)
self.name = 'NumAlleles'
self.desc = 'Number of Alleles'
def _get_values(self, pop):
stat = sp.Stat(alleleFreq=True)
stat.apply(pop)
anum = pop.dvars().alleleNum
loci = list(anum.keys())
loci.sort()
anums = [len(anum[l]) for l in loci]
return anums
class LDNe(Parameter):
'''Estimating Ne according to LD (Waples)'''
def __init__(self, pcrit=0.02, **kwargs):
Parameter.__init__(self, kwargs)
self.name = 'LDNe'
self.desc = 'LDNe'
self.pcrit = pcrit
def _get_values(self, pop):
stat = sp.Stat(effectiveSize=sp.ALL_AVAIL, vars='Ne_LD')
stat.apply(pop)
ne_est = pop.dvars().Ne_LD
return ne_est[self.pcrit]
class FreqDerived(Parameter):
'''Frequency of the derived allele.'''
def __init__(self, **kwargs):
Parameter.__init__(self, kwargs)
self.name = 'FreqDerived'
self.desc = 'Frequency of the Derived Allele'
def _get_values(self, pop):
stat = sp.Stat(alleleFreq=True)
stat.apply(pop)
anum = pop.dvars().alleleFreq
loci = list(anum.keys())
loci.sort()
anums = [anum[l][1] for l in loci]
return anums
class StructuredParameter(Parameter):
'''A parameter that is applied to population structure.'''
def __init__(self, **kwargs):
kwargs['do_structured'] = True
Parameter.__init__(self, kwargs)
class FST(StructuredParameter):
'''Mean FST.'''
def __init__(self, **kwargs):
StructuredParameter.__init__(self)
self.name = 'FST'
self.desc = 'FST'
def _get_values(self, pop):
stat = sp.Stat(structure=sp.ALL_AVAIL)
stat.apply(pop)
my_fst = pop.dvars().F_st
return [my_fst]
class fst(StructuredParameter):
'''FST per locus.'''
def __init__(self):
StructuredParameter.__init__(self)
self.name = 'fst'
self.desc = 'FST per locus'
def _get_values(self, pop):
st = sp.Stat(structure=sp.ALL_AVAIL, vars=['f_st'])
st.apply(pop)
fsts = pop.dvars().f_st
loci = list(fsts.keys())
return [fsts[l] for l in loci]
class IndividualParameter(Parameter):
'''A Parameter that returns a value per individual'''
def __init__(self):
Parameter.__init__(self)
class PCA(IndividualParameter):
'''Principal Components Analysis.'''
def __init__(self):
IndividualParameter.__init__(self)
self.info_fields = ['ind_id']
def _get_values(self, pop):
nsp = pop.numSubPop()
all_alleles = []
for subpop in range(nsp):
for ind in pop.individuals(subPop=subpop):
geno = ind.genotype()
n_markers = len(geno) // 2
for mi in range(n_markers):
if len(all_alleles) <= mi:
all_alleles.append(set())
a1 = geno[mi]
a2 = geno[mi + n_markers]
all_alleles[mi].add(a1)
all_alleles[mi].add(a2)
for i, alleles in enumerate(all_alleles):
all_alleles[i] = sorted(list(alleles))
inds = defaultdict(list)
for mi in range(n_markers):
for subpop in range(nsp):
for i, ind in enumerate(pop.individuals(subPop=subpop)):
geno = ind.genotype()
a1 = geno[mi]
a2 = geno[mi + n_markers]
for a in all_alleles[mi]:
inds[(subpop, i)].append([a1, a2].count(a))
ind_order = sorted(list(inds.keys()))
arr = []
for ind in ind_order:
arr.append(inds[ind])
my_pca = pca(n_components=2)
X = np.array(arr)
my_pca.fit(X)
X_r = my_pca.transform(X)
my_components = {}
for i, ind in enumerate(ind_order):
my_components[ind] = X_r[i]
return my_components
| agpl-3.0 |
jhlch/sparklingpandas | sparklingpandas/pcontext.py | 1 | 13246 | """Provide an easy interface for loading data into L{DataFrame}s for Spark.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
import pandas
from StringIO import StringIO as sio
from pyspark.context import SparkContext
from sparklingpandas.dataframe import DataFrame, _normalize_index_names
class PSparkContext():
"""This is a thin wrapper around SparkContext from PySpark which makes it
easy to load data into L{DataFrame}s."""
def __init__(self, spark_context, sql_ctx=None):
"""Initialize a PSparkContext with the associacted spark context,
and Spark SQL context if provided. This context is usef to load
data into L{DataFrame}s.
Parameters
----------
spark_context: SparkContext
Initialized and configured spark context. If you are running in the
PySpark shell, this is already created as "sc".
sql_ctx: SQLContext, optional
Initialized and configured SQL context, if not provided Sparkling
Panda's will create one.
Returns
-------
Correctly initialized SparklingPandasContext.
"""
self.spark_ctx = spark_context
if sql_ctx:
self.sql_ctx = sql_ctx
else:
print "No sql context provided, creating"
from pyspark.sql import SQLContext
self.sql_ctx = SQLContext(self.spark_ctx)
@classmethod
def simple(cls, *args, **kwargs):
"""Takes the same arguments as SparkContext and constructs a
PSparkContext"""
return PSparkContext(SparkContext(*args, **kwargs))
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0,
*args, **kwargs):
"""Read a CSV file in and parse it into Pandas DataFrames. By default,
the first row from the first partition of that data is parsed and used
as the column names for the data from. If no 'names' param is
provided we parse the first row of the first partition of data and
use it for column names.
Parameters
----------
file_path: string
Path to input. Any valid file path in Spark works here, eg:
'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/'
use_whole_file: boolean
Whether of not to use the whole file.
names: list of strings, optional
skiprows: integer, optional
indicates how many rows of input to skip. This will
only be applied to the first partition of the data (so if
#skiprows > #row in first partition this will not work). Generally
this shouldn't be an issue for small values of skiprows.
No other value of header is supported.
All additional parameters available in pandas.read_csv() are usable
here.
Returns
-------
A SparklingPandas DataFrame that contains the data from the
specified file.
"""
def csv_file(partition_number, files):
# pylint: disable=unexpected-keyword-arg
file_count = 0
for _, contents in files:
# Only skip lines on the first file
if partition_number == 0 and file_count == 0 and _skiprows > 0:
yield pandas.read_csv(
sio(contents), *args,
header=None,
names=mynames,
skiprows=_skiprows,
**kwargs)
else:
file_count += 1
yield pandas.read_csv(
sio(contents), *args,
header=None,
names=mynames,
**kwargs)
def csv_rows(partition_number, rows):
# pylint: disable=unexpected-keyword-arg
in_str = "\n".join(rows)
if partition_number == 0:
return iter([
pandas.read_csv(
sio(in_str), *args, header=None,
names=mynames,
skiprows=_skiprows,
**kwargs)])
else:
# could use .iterows instead?
return iter([pandas.read_csv(sio(in_str), *args, header=None,
names=mynames, **kwargs)])
# If we need to peak at the first partition and determine the column
# names
mynames = None
_skiprows = skiprows
if names:
mynames = names
else:
# In the future we could avoid this expensive call.
first_line = self.spark_ctx.textFile(file_path).first()
frame = pandas.read_csv(sio(first_line), **kwargs)
# pylint sees frame as a tuple despite it being a DataFrame
mynames = list(frame.columns)
_skiprows += 1
# Do the actual load
if use_whole_file:
return self.from_pandas_rdd(
self.spark_ctx.wholeTextFiles(file_path)
.mapPartitionsWithIndex(csv_file))
else:
return self.from_pandas_rdd(
self.spark_ctx.textFile(file_path)
.mapPartitionsWithIndex(csv_rows))
def parquetFile(self, *paths):
"""Loads a Parquet file, returning the result as a L{DataFrame}.
Parameters
----------
paths: string, variable length
The path(s) of the parquet files to load. Should be Hadoop style
paths (e.g. hdfs://..., file://... etc.).
Returns
-------
A L{DataFrame} of the contents of the parquet files.
"""
return self.from_spark_rdd(self.sql_ctx.parquetFile(paths))
def jsonFile(self, path, schema=None, sampling_ratio=1.0):
"""Loads a text file storing one JSON object per line as a
L{DataFrame}.
Parameters
----------
path: string
The path of the json files to load. Should be Hadoop style
paths (e.g. hdfs://..., file://... etc.).
schema: StructType, optional
If you know the schema of your input data you can specify it. The
schema is specified using Spark SQL's schema format. If not
specified will sample the json records to determine the schema.
Spark SQL's schema format is documented (somewhat) in the
"Programmatically Specifying the Schema" of the Spark SQL
programming guide at: http://bit.ly/sparkSQLprogrammingGuide
sampling_ratio: int, default=1.0
Percentage of the records to sample when infering schema.
Defaults to all records for safety, but you may be able to set to
a lower ratio if the same fields are present accross records or
your input is of sufficient size.
Returns
-------
A L{DataFrame} of the contents of the json files.
"""
schema_rdd = self.sql_ctx.jsonFile(path, schema, sampling_ratio)
return self.from_spark_rdd(schema_rdd)
def from_pd_data_frame(self, local_df):
"""Make a Sparkling Pandas dataframe from a local Pandas DataFrame.
The intend use is for testing or joining distributed data with local
data.
The types are re-infered, so they may not match.
Parameters
----------
local_df: Pandas DataFrame
The data to turn into a distributed Sparkling Pandas DataFrame.
See http://bit.ly/pandasDataFrame for docs.
Returns
-------
A Sparkling Pandas DataFrame.
"""
def frame_to_rows(frame):
"""Convert a Pandas DataFrame into a list of Spark SQL Rows"""
# TODO: Convert to row objects directly?
return [r.tolist() for r in frame.to_records()]
schema = list(local_df.columns)
index_names = list(local_df.index.names)
index_names = _normalize_index_names(index_names)
schema = index_names + schema
rows = self.spark_ctx.parallelize(frame_to_rows(local_df))
sp_df = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(
rows,
schema=schema,
# Look at all the rows, should be ok since coming from
# a local dataset
samplingRatio=1))
sp_df._index_names = index_names
return sp_df
def sql(self, query):
"""Perform a SQL query and create a L{DataFrame} of the result.
The SQL query is run using Spark SQL. This is not intended for
querying arbitrary databases, but rather querying Spark SQL tables.
Parameters
----------
query: string
The SQL query to pass to Spark SQL to execute.
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(self.sql_ctx.sql(query), self.sql_ctx)
def table(self, table):
"""Returns the provided Spark SQL table as a L{DataFrame}
Parameters
----------
table: string
The name of the Spark SQL table to turn into a L{DataFrame}
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(self.sql_ctx.table(table),
self.sql_ctx)
def from_spark_rdd(self, spark_rdd):
"""
Translates a Spark DataFrame into a Sparkling Pandas Dataframe.
Currently, no checking or validation occurs.
Parameters
----------
spark_rdd: Spark DataFrame
Input Spark DataFrame.
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(spark_rdd, self.sql_ctx)
def DataFrame(self, elements, *args, **kwargs):
"""Create a Sparkling Pandas DataFrame for the provided
elements, following the same API as constructing a Panda's DataFrame.
Note: since elements is local this is only useful for distributing
dataframes which are small enough to fit on a single machine anyways.
Parameters
----------
elements: numpy ndarray (structured or homogeneous), dict, or
Pandas DataFrame.
Input elements to use with the DataFrame.
Additional parameters as defined by L{pandas.DataFrame}.
Returns
-------
Sparkling Pandas DataFrame."""
return self.from_pd_data_frame(pandas.DataFrame(
elements,
*args,
**kwargs))
def from_pandas_rdd(self, pandas_rdd):
"""Create a Sparkling Pandas DataFrame from the provided RDD
which is comprised of Panda's DataFrame. Note: the current version
drops index information.
Parameters
----------
pandas_rdd: RDD[pandas.DataFrame]
Returns
-------
Sparkling Pandas DataFrame."""
return DataFrame.fromDataFrameRDD(pandas_rdd, self.sql_ctx)
def read_json(self, file_path,
*args, **kwargs):
"""Read a json file in and parse it into Pandas DataFrames.
If no names is provided we use the first row for the names.
Currently, it is not possible to skip the first n rows of a file.
Headers are provided in the json file and not specified separately.
Parameters
----------
file_path: string
Path to input. Any valid file path in Spark works here, eg:
'my/path/in/local/file/system' or 'hdfs:/user/juliet/'
Other than skipRows, all additional parameters available in
pandas.read_csv() are usable here.
Returns
-------
A SparklingPandas DataFrame that contains the data from the
specified file.
"""
def json_file_to_df(files):
""" Transforms a JSON file into a list of data"""
for _, contents in files:
yield pandas.read_json(sio(contents), *args, **kwargs)
return self.from_pandas_rdd(self.spark_ctx.wholeTextFiles(file_path)
.mapPartitions(json_file_to_df))
def stop(self):
"""Stop the underlying SparkContext
"""
self.spark_ctx.stop()
| apache-2.0 |
shangwuhencc/shogun | examples/undocumented/python_modular/graphical/util.py | 26 | 2670 | """ Utilities for matplotlib examples """
import pylab
from numpy import ones, array, double, meshgrid, reshape, linspace, \
concatenate, ravel, pi, sinc
from numpy.random import randn, rand
from modshogun import BinaryLabels, RegressionLabels, RealFeatures, SparseRealFeatures
QUITKEY='q'
NUM_EXAMPLES=100
DISTANCE=2
def quit (event):
if event.key==QUITKEY or event.key==QUITKEY.upper():
pylab.close()
def set_title (title):
quitmsg=" (press '"+QUITKEY+"' to quit)"
complete=title+quitmsg
manager=pylab.get_current_fig_manager()
# now we have to wrap the toolkit
if hasattr(manager, 'window'):
if hasattr(manager.window, 'setCaption'): # QT
manager.window.setCaption(complete)
if hasattr(manager.window, 'set_title'): # GTK
manager.window.set_title(complete)
elif hasattr(manager.window, 'title'): # TK
manager.window.title(complete)
def get_realdata(positive=True):
if positive:
return randn(2, NUM_EXAMPLES)+DISTANCE
else:
return randn(2, NUM_EXAMPLES)-DISTANCE
def get_realfeatures(pos, neg):
arr=array((pos, neg))
features = concatenate(arr, axis=1)
return RealFeatures(features)
def get_labels(raw=False, type='binary'):
data = concatenate(array(
(-ones(NUM_EXAMPLES, dtype=double), ones(NUM_EXAMPLES, dtype=double))
))
if raw:
return data
else:
if type == 'binary':
return BinaryLabels(data)
if type == 'regression':
return RegressionLabels(data)
return None
def compute_output_plot_isolines(classifier, kernel=None, train=None, sparse=False, pos=None, neg=None, regression=False):
size=100
if pos is not None and neg is not None:
x1_max=max(1.2*pos[0,:])
x1_min=min(1.2*neg[0,:])
x2_min=min(1.2*neg[1,:])
x2_max=max(1.2*pos[1,:])
x1=linspace(x1_min, x1_max, size)
x2=linspace(x2_min, x2_max, size)
else:
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
dense=RealFeatures(array((ravel(x), ravel(y))))
if sparse:
test=SparseRealFeatures()
test.obtain_from_simple(dense)
else:
test=dense
if kernel and train:
kernel.init(train, test)
else:
classifier.set_features(test)
labels = None
if regression:
labels=classifier.apply().get_labels()
else:
labels=classifier.apply().get_values()
z=labels.reshape((size, size))
return x, y, z
def get_sinedata():
x=4*rand(1, NUM_EXAMPLES)-DISTANCE
x.sort()
y=sinc(pi*x)+0.1*randn(1, NUM_EXAMPLES)
return x, y
def compute_output_plot_isolines_sine(classifier, kernel, train, regression=False):
x=4*rand(1, 500)-2
x.sort()
test=RealFeatures(x)
kernel.init(train, test)
if regression:
y=classifier.apply().get_labels()
else:
y=classifier.apply().get_values()
return x, y
| gpl-3.0 |
pprett/statsmodels | statsmodels/sandbox/km_class.py | 5 | 11704 | #a class for the Kaplan-Meier estimator
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
class KAPLAN_MEIER(object):
def __init__(self, data, timesIn, groupIn, censoringIn):
raise RuntimeError('Newer version of Kaplan-Meier class available in survival2.py')
#store the inputs
self.data = data
self.timesIn = timesIn
self.groupIn = groupIn
self.censoringIn = censoringIn
def fit(self):
#split the data into groups based on the predicting variable
#get a set of all the groups
groups = list(set(self.data[:,self.groupIn]))
#create an empty list to store the data for different groups
groupList = []
#create an empty list for each group and add it to groups
for i in range(len(groups)):
groupList.append([])
#iterate through all the groups in groups
for i in range(len(groups)):
#iterate though the rows of dataArray
for j in range(len(self.data)):
#test if this row has the correct group
if self.data[j,self.groupIn] == groups[i]:
#add the row to groupList
groupList[i].append(self.data[j])
#create an empty list to store the times for each group
timeList = []
#iterate through all the groups
for i in range(len(groupList)):
#create an empty list
times = []
#iterate through all the rows of the group
for j in range(len(groupList[i])):
#get a list of all the times in the group
times.append(groupList[i][j][self.timesIn])
#get a sorted set of the times and store it in timeList
times = list(sorted(set(times)))
timeList.append(times)
#get a list of the number at risk and events at each time
#create an empty list to store the results in
timeCounts = []
#create an empty list to hold points for plotting
points = []
#create a list for points where censoring occurs
censoredPoints = []
#iterate trough each group
for i in range(len(groupList)):
#initialize a variable to estimate the survival function
survival = 1
#initialize a variable to estimate the variance of
#the survival function
varSum = 0
#initialize a counter for the number at risk
riskCounter = len(groupList[i])
#create a list for the counts for this group
counts = []
##create a list for points to plot
x = []
y = []
#iterate through the list of times
for j in range(len(timeList[i])):
if j != 0:
if j == 1:
#add an indicator to tell if the time
#starts a new group
groupInd = 1
#add (0,1) to the list of points
x.append(0)
y.append(1)
#add the point time to the right of that
x.append(timeList[i][j-1])
y.append(1)
#add the point below that at survival
x.append(timeList[i][j-1])
y.append(survival)
#add the survival to y
y.append(survival)
else:
groupInd = 0
#add survival twice to y
y.append(survival)
y.append(survival)
#add the time twice to x
x.append(timeList[i][j-1])
x.append(timeList[i][j-1])
#add each censored time, number of censorings and
#its survival to censoredPoints
censoredPoints.append([timeList[i][j-1],
censoringNum,survival,groupInd])
#add the count to the list
counts.append([timeList[i][j-1],riskCounter,
eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#increment the number at risk
riskCounter += -1*(riskChange)
#initialize a counter for the change in the number at risk
riskChange = 0
#initialize a counter to zero
eventCounter = 0
#intialize a counter to tell when censoring occurs
censoringCounter = 0
censoringNum = 0
#iterate through the observations in each group
for k in range(len(groupList[i])):
#check of the observation has the given time
if (groupList[i][k][self.timesIn]) == (timeList[i][j]):
#increment the number at risk counter
riskChange += 1
#check if this is an event or censoring
if groupList[i][k][self.censoringIn] == 1:
#add 1 to the counter
eventCounter += 1
else:
censoringNum += 1
#check if there are any events at this time
if eventCounter != censoringCounter:
censoringCounter = eventCounter
#calculate the estimate of the survival function
survival *= ((float(riskCounter) -
eventCounter)/(riskCounter))
try:
#calculate the estimate of the variance
varSum += (eventCounter)/((riskCounter)
*(float(riskCounter)-
eventCounter))
except ZeroDivisionError:
varSum = 0
#append the last row to counts
counts.append([timeList[i][len(timeList[i])-1],
riskCounter,eventCounter,survival,
sqrt(((survival)**2)*varSum)])
#add the last time once to x
x.append(timeList[i][len(timeList[i])-1])
x.append(timeList[i][len(timeList[i])-1])
#add the last survival twice to y
y.append(survival)
#y.append(survival)
censoredPoints.append([timeList[i][len(timeList[i])-1],
censoringNum,survival,1])
#add the list for the group to al ist for all the groups
timeCounts.append(np.array(counts))
points.append([x,y])
#returns a list of arrays, where each array has as it columns: the time,
#the number at risk, the number of events, the estimated value of the
#survival function at that time, and the estimated standard error at
#that time, in that order
self.results = timeCounts
self.points = points
self.censoredPoints = censoredPoints
def plot(self):
x = []
#iterate through the groups
for i in range(len(self.points)):
#plot x and y
plt.plot(np.array(self.points[i][0]),np.array(self.points[i][1]))
#create lists of all the x and y values
x += self.points[i][0]
for j in range(len(self.censoredPoints)):
#check if censoring is occuring
if (self.censoredPoints[j][1] != 0):
#if this is the first censored point
if (self.censoredPoints[j][3] == 1) and (j == 0):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this censored point starts a new group
elif ((self.censoredPoints[j][3] == 1) and
(self.censoredPoints[j-1][3] == 1)):
#calculate a distance beyond 1 to place it
#so all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the censored points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((1+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is the last censored point
elif j == (len(self.censoredPoints) - 1):
#calculate a distance beyond the previous time
#so that all the points will fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j][0])))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vertical line for censoring
plt.vlines((self.censoredPoints[j-1][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#if this is a point in the middle of the group
else:
#calcuate a distance beyond the current time
#to place the point, so they all fit
dx = ((1./((self.censoredPoints[j][1])+1.))
*(float(self.censoredPoints[j+1][0])
- self.censoredPoints[j][0]))
#iterate through all the points at this time
for k in range(self.censoredPoints[j][1]):
#plot a vetical line for censoring
plt.vlines((self.censoredPoints[j][0]+((k+1)*dx)),
self.censoredPoints[j][2]-0.03,
self.censoredPoints[j][2]+0.03)
#set the size of the plot so it extends to the max x and above 1 for y
plt.xlim((0,np.max(x)))
plt.ylim((0,1.05))
#label the axes
plt.xlabel('time')
plt.ylabel('survival')
plt.show()
def show_results(self):
#start a string that will be a table of the results
resultsString = ''
#iterate through all the groups
for i in range(len(self.results)):
#label the group and header
resultsString += ('Group {0}\n\n'.format(i) +
'Time At Risk Events Survival Std. Err\n')
for j in self.results[i]:
#add the results to the string
resultsString += (
'{0:<9d}{1:<12d}{2:<11d}{3:<13.4f}{4:<6.4f}\n'.format(
int(j[0]),int(j[1]),int(j[2]),j[3],j[4]))
print(resultsString)
| bsd-3-clause |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/nodule_segmentation/LIDC-annotations_2_nodule-seg_annotations/LIDC_preprocessing/5_cluster_nodules_per_slice/cluster_nodules_expert_panel_2d.py | 2 | 5278 | import os,sys
import numpy
import SimpleITK
import matplotlib.pyplot as plt
import pandas as pd
import cv2
import numpy as np
from sklearn.cluster import DBSCAN
def sitk_show_opencv(slice_array, nodule_position_lst):
diff = np.max(slice_array) - np.min(slice_array)
slice_array = slice_array - np.min(slice_array)
slice_array = slice_array / float(diff)
for nodule_position in nodule_position_lst:
cv2.rectangle(slice_array,(nodule_position[0]-5,nodule_position[1]-5),(nodule_position[0]+5,nodule_position[1]+5), (255,0,0),1)
cv2.imshow("view nodule",slice_array)
cv2.waitKey(0)
nodule_info_df = pd.read_csv("/media/philipp/qnap/LIDC/preprocessing/4_get_nodule_info_per_slice/dataframe_nodules_gt3mm.csv",sep="\t")
xml_paths_unblinded_read = np.genfromtxt("/media/philipp/qnap/LIDC/preprocessing/2_get_xml_paths_unblinded_read/xml_path_lst_unblinded_read.csv", dtype=str)
original_spacing_df = pd.read_csv("/media/philipp/qnap/LIDC/preprocessing/3_save_dicom_metadata/dicom_metadata.csv",header=0,sep="\t")
min_experts = 1
max_dist = 5 # max_distance in millimeters
verified_nodules_radiologist_id = []
verified_nodules_nodule_id = []
verified_nodules_dcm_path = []
verified_nodules_x_center = []
verified_nodules_y_center = []
verified_nodules_sliceIdx = []
verified_nodules_x_min = []
verified_nodules_x_max = []
verified_nodules_y_min = []
verified_nodules_y_max = []
for counter, xml_path in enumerate(xml_paths_unblinded_read):
print (counter+1,len(xml_paths_unblinded_read))
dcm_path = os.path.dirname(os.path.abspath(xml_path))
dcm_path = dcm_path.split('/')
dcm_path[2] = "philipp"
dcm_path = '/'.join(dcm_path)
this_spacing_df = original_spacing_df[original_spacing_df['dcm_path'] == dcm_path]
if(len(this_spacing_df) != 1): # if dcm_path does not exist in dcm_path_df: maxbe wrong username?
print "dcm_path not found in /media/philipp/qnap/LIDC/preprocessing/3_write_original_spacing_info/original_spacings.csv"
print "wrong username?"
sys.exit()
x_spacing = this_spacing_df["x_spacing"].values[0]
y_spacing = this_spacing_df["y_spacing"].values[0]
epsilon = int(max_dist/np.max(x_spacing,y_spacing))
nodules_in_dcm = nodule_info_df[nodule_info_df["dcm_path"] == dcm_path]
sliceIdx_set = list(set(nodules_in_dcm["sliceIdx"].values))
sliceIdx_set = [x for x in sliceIdx_set if x >= 0] # delete negative slice Ids (warum existieren die???)
for sliceIdx in sliceIdx_set:
nodules_in_slice = nodules_in_dcm[nodules_in_dcm["sliceIdx"] == sliceIdx]
radiologist_id_arr = nodules_in_slice["radiologist_id"].values
x_center_arr = nodules_in_slice["x_center"].values
y_center_arr = nodules_in_slice["y_center"].values
x_min_arr = nodules_in_slice["x_min"].values
x_max_arr = nodules_in_slice["x_max"].values
y_min_arr = nodules_in_slice["y_min"].values
y_max_arr = nodules_in_slice["y_max"].values
nodule_positions = np.asarray(zip(x_center_arr, y_center_arr))
db = DBSCAN(eps=epsilon, min_samples=min_experts).fit(nodule_positions)
labels = db.labels_
for cluster_id in list(set(labels)):
if(cluster_id != -1):
cluster_nodules_radiologist_id = radiologist_id_arr[labels == cluster_id]
cluster_nodules_x_center = x_center_arr[labels == cluster_id]
cluster_nodules_y_center = y_center_arr[labels == cluster_id]
cluster_nodules_x_min = x_min_arr[labels == cluster_id]
cluster_nodules_x_max = x_max_arr[labels == cluster_id]
cluster_nodules_y_min = y_min_arr[labels == cluster_id]
cluster_nodules_y_max = y_max_arr[labels == cluster_id]
if(len(set(cluster_nodules_radiologist_id)) >= min_experts ): #check ob alle markierungen tatsaechlich von UNTERSCHIEDLICHEN radiologen kommen!
string = ""
for rad in cluster_nodules_radiologist_id:
string += str(rad)
verified_nodules_radiologist_id += [string]
verified_nodules_nodule_id += ["merged"]
verified_nodules_dcm_path += [dcm_path]
verified_nodules_x_center += [int(np.mean(cluster_nodules_x_center))]
verified_nodules_y_center += [int(np.mean(cluster_nodules_y_center))]
verified_nodules_sliceIdx += [sliceIdx]
verified_nodules_x_min += [np.min(cluster_nodules_x_min)]
verified_nodules_x_max += [np.max(cluster_nodules_x_max)]
verified_nodules_y_min += [np.min(cluster_nodules_y_min)]
verified_nodules_y_max += [np.max(cluster_nodules_y_max)]
#print len(verified_nodules_radiologist_id), len(verified_nodules_nodule_id), len(verified_nodules_x_min)
#print '---------------------------------------------------'
print len(verified_nodules_radiologist_id)
df = pd.DataFrame()
df.insert(0,"radiologist_id",verified_nodules_radiologist_id)
df.insert(1,"nodule_id",verified_nodules_nodule_id)
df.insert(2,"dcm_path",verified_nodules_dcm_path)
df.insert(3,"x_center",verified_nodules_x_center)
df.insert(4,"y_center",verified_nodules_y_center)
df.insert(5,"sliceIdx",verified_nodules_sliceIdx)
df.insert(6,"x_min",verified_nodules_x_min)
df.insert(7,"x_max",verified_nodules_x_max)
df.insert(8,"y_min",verified_nodules_y_min)
df.insert(9,"y_max",verified_nodules_y_max)
df.to_csv('nodules_gt3mm_min'+str(min_experts)+'.csv', sep = '\t')
| mit |
waynenilsen/statsmodels | statsmodels/tools/pca.py | 25 | 31232 | """Principal Component Analysis
Author: josef-pktd
Modified by Kevin Sheppard
"""
from __future__ import print_function, division
import numpy as np
import pandas as pd
from statsmodels.compat.python import range
from statsmodels.compat.numpy import nanmean
def _norm(x):
return np.sqrt(np.sum(x * x))
class PCA(object):
"""
Principal Component Analysis
Parameters
----------
data : array-like
Variables in columns, observations in rows
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller of the number of rows or columns in data
standardize: bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean. Using standardized
data is equivalent to computing principal components from the
correlation matrix of data
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True. Demeaning data
but not standardizing is equivalent to computing principal components
from the covariance matrix of data
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner product.
If False, the loadings will have unit inner product.
weights : array, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components. Setting gls to True requires
ncomp to be less then the min of the number of rows or columns
method : str, optional
Sets the linear algebra routine used to compute eigenvectors
'svd' uses a singular value decomposition (default).
'eig' uses an eigenvalue decomposition of a quadratic form
'nipals' uses the NIPALS algorithm and can be faster than SVD when
ncomp is small and nvars is large. See notes about additional changes
when using NIPALS
tol : float, optional
Tolerance to use when checking for convergence when using NIPALS
max_iter : int, optional
Maximum iterations when using NIPALS
missing : string
Method for missing data. Choices are
'drop-row' - drop rows with missing values
'drop-col' - drop columns with missing values
'drop-min' - drop either rows or columns, choosing by data retention
'fill-em' - use EM algorithm to fill missing value. ncomp should be
set to the number of factors required
tol_em : float
Tolerance to use when checking for convergence of the EM algorithm
max_em_iter : int
Maximum iterations for the EM algorithm
Attributes
----------
factors : array or DataFrame
nobs by ncomp array of of principal components (scores)
scores : array or DataFrame
nobs by ncomp array of of principal components - identical to factors
loadings : array or DataFrame
ncomp by nvar array of principal component loadings for constructing
the factors
coeff : array or DataFrame
nvar by ncomp array of principal component loadings for constructing
the projections
projection : array or DataFrame
nobs by var array containing the projection of the data onto the ncomp
estimated factors
rsquare : array or Series
ncomp array where the element in the ith position is the R-square
of including the fist i principal components. Note: values are
calculated on the transformed data, not the original data
ic : array or DataFrame
ncomp by 3 array containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : array or Series
nvar array of eigenvalues
eigenvecs : array or DataFrame
nvar by nvar array of eigenvectors
weights : array
nvar array of weights used to compute the principal components,
normalized to unit length
transformed_data : array
Standardized, demeaned and weighted data used to compute
principal components and related quantities
cols : array
Array of indices indicating columns used in the PCA
rows : array
Array of indices indicating rows used in the PCA
Methods
-------
plot_scree
Scree plot of the eigenvalues
plot_rsquare
Individual series R-squared plotted against the number of factors
project
Compute projection for a given number of factors
Examples
--------
Basic PCA using the correlation matrix of the data
>>> import numpy as np
>>> from statsmodels.tools.pca import PCA
>>> x = np.random.randn(100)[:, None]
>>> x = x + np.random.randn((100, 100))
>>> pc = PCA(x)
Note that the principal components are computed using a SVD and so the
correlation matrix is never constructed, unless method='eig'.
PCA using the covariance matrix of the data
>>> pc = PCA(x, standardize=False)
Limiting the number of factors returned to 1 computed using NIPALS
>>> pc = PCA(x, ncomp=1, method='nipals')
>>> pc.factors.shape
(100, 1)
Notes
-----
The default options perform principal component analysis on the
demeanded, unit variance version of data. Setting standardize to False will
instead onle demean, and setting both standardized and
demean to False will not alter the data.
Once the data have been transformed, the following relationships hold when
the number of components (ncomp) is the same as tne minimum of the number
of observation or the number of variables.
.. math:
X' X = V \\Lambda V'
.. math:
F = X V
.. math:
X = F V'
where X is the `data`, F is the array of principal components (`factors`
or `scores`), and V is the array of eigenvectors (`loadings`) and V' is
the array of factor coefficients (`coeff`).
When weights are provided, the principal components are computed from the
modified data
.. math:
\\Omega^{-\\frac{1}{2}} X
where :math:`\\Omega` is a diagonal matrix composed of the weights. For
example, when using the GLS version of PCA, the elements of :math:`\\Omega`
will be the inverse of the variances of the residuals from
.. math:
X - F V'
where the number of factors is less than the rank of X
.. [1] J. Bai and S. Ng, "Determining the number of factors in approximate
factor models," Econometrica, vol. 70, number 1, pp. 191-221, 2002
"""
def __init__(self, data, ncomp=None, standardize=True, demean=True,
normalize=True, gls=False, weights=None, method='svd',
missing=None, tol=5e-8, max_iter=1000, tol_em=5e-8,
max_em_iter=100, ):
self._index = None
self._columns = []
if isinstance(data, pd.DataFrame):
self._index = data.index
self._columns = data.columns
self.data = np.asarray(data)
# Store inputs
self._gls = gls
self._normalize = normalize
self._tol = tol
if not 0 < self._tol < 1:
raise ValueError('tol must be strictly between 0 and 1')
self._max_iter = max_iter
self._max_em_iter = max_em_iter
self._tol_em = tol_em
# Prepare data
self._standardize = standardize
self._demean = demean
self._nobs, self._nvar = self.data.shape
if weights is None:
weights = np.ones(self._nvar)
else:
weights = np.array(weights).flatten()
if weights.shape[0] != self._nvar:
raise ValueError('weights should have nvar elements')
weights = weights / np.sqrt((weights ** 2.0).mean())
self.weights = weights
# Check ncomp against maximum
min_dim = min(self._nobs, self._nvar)
self._ncomp = min_dim if ncomp is None else ncomp
if self._ncomp > min_dim:
import warnings
warn = 'The requested number of components is more than can be ' \
'computed from data. The maximum number of components is ' \
'the minimum of the number of observations or variables'
warnings.warn(warn)
self._ncomp = min_dim
self._method = method
if self._method == 'eig':
self._compute_eig = self._compute_using_eig
elif self._method == 'svd':
self._compute_eig = self._compute_using_svd
elif self._method == 'nipals':
self._compute_eig = self._compute_using_nipals
else:
raise ValueError('method is not known.')
self.rows = np.arange(self._nobs)
self.cols = np.arange(self._nvar)
# Handle missing
self._missing = missing
self._adjusted_data = self.data
if missing is not None:
self._adjust_missing()
# Update size
self._nobs, self._nvar = self._adjusted_data.shape
if self._ncomp == np.min(self.data.shape):
self._ncomp = np.min(self._adjusted_data.shape)
elif self._ncomp > np.min(self._adjusted_data.shape):
raise ValueError('When adjusting for missing values, user '
'provided ncomp must be no larger than the '
'smallest dimension of the '
'missing-value-adjusted data size.')
# Attributes and internal values
self._tss = 0.0
self._ess = None
self.transformed_data = None
self._mu = None
self._sigma = None
self._ess_indiv = None
self._tss_indiv = None
self.scores = self.factors = None
self.loadings = None
self.coeff = None
self.eigenvals = None
self.eigenvecs = None
self.projection = None
self.rsquare = None
self.ic = None
# Prepare data
self.transformed_data = self._prepare_data()
# Perform the PCA
self._pca()
if gls:
self._compute_gls_weights()
self.transformed_data = self._prepare_data()
self._pca()
# Final calculations
self._compute_rsquare_and_ic()
if self._index is not None:
self._to_pandas()
def _adjust_missing(self):
"""
Implements alternatives for handling missing values
"""
def keep_col(x):
index = np.logical_not(np.any(np.isnan(x), 0))
return x[:, index], index
def keep_row(x):
index = np.logical_not(np.any(np.isnan(x), 1))
return x[index, :], index
if self._missing == 'drop-col':
self._adjusted_data, index = keep_col(self.data)
self.cols = np.where(index)[0]
self.weights = self.weights[index]
elif self._missing == 'drop-row':
self._adjusted_data, index = keep_row(self.data)
self.rows = np.where(index)[0]
elif self._missing == 'drop-min':
drop_col, drop_col_index = keep_col(self.data)
drop_col_size = drop_col.size
drop_row, drop_row_index = keep_row(self.data)
drop_row_size = drop_row.size
if drop_row_size > drop_col_size:
self._adjusted_data = drop_row
self.rows = np.where(drop_row_index)[0]
else:
self._adjusted_data = drop_col
self.weights = self.weights[drop_col_index]
self.cols = np.where(drop_col_index)[0]
elif self._missing == 'fill-em':
self._adjusted_data = self._fill_missing_em()
else:
raise ValueError('missing method is not known.')
# Check adjusted data size
if self._adjusted_data.size == 0:
raise ValueError('Removal of missing values has eliminated all data.')
def _compute_gls_weights(self):
"""
Computes GLS weights based on percentage of data fit
"""
errors = self.transformed_data - np.asarray(self.projection)
if self._ncomp == self._nvar:
raise ValueError('gls can only be used when ncomp < nvar '
'so that residuals have non-zero variance')
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
nvar = self._nvar
eff_series_perc = (1.0 / sum((weights / weights.sum()) ** 2.0)) / nvar
if eff_series_perc < 0.1:
eff_series = int(np.round(eff_series_perc * nvar))
import warnings
warn = 'Many series are being down weighted by GLS. Of the ' \
'{original} series, the GLS estimates are based on only ' \
'{effective} (effective) ' \
'series.'.format(original=nvar, effective=eff_series)
warnings.warn(warn)
self.weights = weights
def _pca(self):
"""
Main PCA routine
"""
self._compute_eig()
self._compute_pca_from_eig()
self.projection = self.project()
def __repr__(self):
string = self.__str__()
string = string[:-1]
string += ', id: ' + hex(id(self)) + ')'
return string
def __str__(self):
string = 'Principal Component Analysis('
string += 'nobs: ' + str(self._nobs) + ', '
string += 'nvar: ' + str(self._nvar) + ', '
if self._standardize:
kind = 'Standardize (Correlation)'
elif self._demean:
kind = 'Demean (Covariance)'
else:
kind = 'None'
string += 'transformation: ' + kind + ', '
if self._gls:
string += 'GLS, '
string += 'normalization: ' + str(self._normalize) + ', '
string += 'number of components: ' + str(self._ncomp) + ', '
string += 'method: ' + 'Eigenvalue' if self._method == 'eig' else 'SVD'
string += ')'
return string
def _prepare_data(self):
"""
Standardize or demean data.
"""
adj_data = self._adjusted_data
if np.all(np.isnan(adj_data)):
return np.empty(adj_data.shape[1]).fill(np.nan)
self._mu = nanmean(adj_data, axis=0)
self._sigma = np.sqrt(nanmean((adj_data - self._mu) ** 2.0, axis=0))
if self._standardize:
data = (adj_data - self._mu) / self._sigma
elif self._demean:
data = (adj_data - self._mu)
else:
data = adj_data
return data / np.sqrt(self.weights)
def _compute_using_svd(self):
"""SVD method to compute eigenvalues and eigenvecs"""
x = self.transformed_data
u, s, v = np.linalg.svd(x)
self.eigenvals = s ** 2.0
self.eigenvecs = v.T
def _compute_using_eig(self):
"""
Eigenvalue decomposition method to compute eigenvalues and eigenvectors
"""
x = self.transformed_data
self.eigenvals, self.eigenvecs = np.linalg.eigh(x.T.dot(x))
def _compute_using_nipals(self):
"""
NIPALS implementation to compute small number of eigenvalues and eigenvectors
"""
x = self.transformed_data
if self._ncomp > 1:
x = x + 0.0 # Copy
tol, max_iter, ncomp = self._tol, self._max_iter, self._ncomp
vals = np.zeros(self._ncomp)
vecs = np.zeros((self._nvar, self._ncomp))
for i in range(ncomp):
max_var_ind = np.argmax(x.var(0))
factor = x[:, [max_var_ind]]
_iter = 0
diff = 1.0
while diff > tol and _iter < max_iter:
vec = x.T.dot(factor) / (factor.T.dot(factor))
vec = vec / np.sqrt(vec.T.dot(vec))
factor_last = factor
factor = x.dot(vec) / (vec.T.dot(vec))
diff = _norm(factor - factor_last) / _norm(factor)
_iter += 1
vals[i] = (factor ** 2).sum()
vecs[:, [i]] = vec
if ncomp > 1:
x -= factor.dot(vec.T)
self.eigenvals = vals
self.eigenvecs = vecs
def _fill_missing_em(self):
"""
EM algorithm to fill missing values
"""
non_missing = np.logical_not(np.isnan(self.data))
# If nothing missing, return without altering the data
if np.all(non_missing):
return self.data
# 1. Standardized data as needed
data = self.transformed_data = self._prepare_data()
ncomp = self._ncomp
# 2. Check for all nans
col_non_missing = np.sum(non_missing, 1)
row_non_missing = np.sum(non_missing, 0)
if np.any(col_non_missing < ncomp) or np.any(row_non_missing < ncomp):
raise ValueError('Implementation requires that all columns and '
'all rows have at least ncomp non-missing values')
# 3. Get mask
mask = np.isnan(data)
# 4. Compute mean
mu = nanmean(data, 0)
# 5. Replace missing with mean
projection = np.ones((self._nobs, 1)) * mu
projection_masked = projection[mask]
data[mask] = projection_masked
# 6. Compute eigenvalues and fit
diff = 1.0
_iter = 0
while diff > self._tol_em and _iter < self._max_em_iter:
last_projection_masked = projection_masked
# Set transformed data to compute eigenvalues
self.transformed_data = data
# Call correct eig function here
self._compute_eig()
# Call function to compute factors and projection
self._compute_pca_from_eig()
projection = self.project(transform=False, unweight=False)
projection_masked = projection[mask]
data[mask] = projection_masked
delta = last_projection_masked - projection_masked
diff = _norm(delta) / _norm(projection_masked)
_iter += 1
# Must copy to avoid overwriting original data since replacing values
data = self._adjusted_data + 0.0
projection = self.project()
data[mask] = projection[mask]
return data
def _compute_pca_from_eig(self):
"""
Compute relevant statistics after eigenvalues have been computed
"""
# Ensure sorted largest to smallest
vals, vecs = self.eigenvals, self.eigenvecs
indices = np.argsort(vals)
indices = indices[::-1]
vals = vals[indices]
vecs = vecs[:, indices]
if (vals <= 0).any():
# Discard and warn
num_good = vals.shape[0] - (vals <= 0).sum()
if num_good < self._ncomp:
import warnings
warn = 'Only {num:d} eigenvalues are positive. The is the ' \
'maximum number of components that can be extracted.'
warnings.warn(warn.format(num=num_good))
self._ncomp = num_good
vals[num_good:] = np.finfo(np.float64).tiny
# Use ncomp for the remaining calculations
vals = vals[:self._ncomp]
vecs = vecs[:, :self._ncomp]
self.eigenvals, self.eigenvecs = vals, vecs
# Select correct number of components to return
self.scores = self.factors = self.transformed_data.dot(vecs)
self.loadings = vecs
self.coeff = vecs.T
if self._normalize:
self.coeff = (self.coeff.T * np.sqrt(vals)).T
self.factors /= np.sqrt(vals)
self.scores = self.factors
def _compute_rsquare_and_ic(self):
"""
Final statistics to compute
"""
# TSS and related calculations
# TODO: This needs careful testing, with and without weights, gls, standardized and demean
weights = self.weights
ss_data = self.transformed_data * np.sqrt(weights)
self._tss_indiv = np.sum(ss_data ** 2, 0)
self._tss = np.sum(self._tss_indiv)
self._ess = np.zeros(self._ncomp + 1)
self._ess_indiv = np.zeros((self._ncomp + 1, self._nvar))
for i in range(self._ncomp + 1):
# Projection in the same space as transformed_data
projection = self.project(ncomp=i, transform=False, unweight=False)
indiv_rss = (projection ** 2).sum(axis=0)
rss = indiv_rss.sum()
self._ess[i] = self._tss - rss
self._ess_indiv[i, :] = self._tss_indiv - indiv_rss
self.rsquare = 1.0 - self._ess / self._tss
# Information Criteria
ess = self._ess
invalid = ess <= 0 # Prevent log issues of 0
if invalid.any():
last_obs = (np.where(invalid)[0]).min()
ess = ess[:last_obs]
log_ess = np.log(ess)
r = np.arange(ess.shape[0])
nobs, nvar = self._nobs, self._nvar
sum_to_prod = (nobs + nvar) / (nobs * nvar)
min_dim = min(nobs, nvar)
penalties = np.array([sum_to_prod * np.log(1.0 / sum_to_prod),
sum_to_prod * np.log(min_dim),
np.log(min_dim) / min_dim])
penalties = penalties[:, None]
ic = log_ess + r * penalties
self.ic = ic.T
def project(self, ncomp=None, transform=True, unweight=True):
"""
Project series onto a specific number of factors
Parameters
----------
ncomp : int, optional
Number of components to use. If omitted, all components
initially computed are used.
Returns
-------
projection : array
nobs by nvar array of the projection onto ncomp factors
transform : bool
Flag indicating whether to return the projection in the original
space of the data (True, default) or in the space of the
standardized/demeaned data
unweight : bool
Flag indicating whether to undo the effects of the estimation
weights
Notes
-----
"""
# Projection needs to be scaled/shifted based on inputs
ncomp = self._ncomp if ncomp is None else ncomp
if ncomp > self._ncomp:
raise ValueError('ncomp must be smaller than the number of '
'components computed.')
factors = np.asarray(self.factors)
coeff = np.asarray(self.coeff)
projection = factors[:, :ncomp].dot(coeff[:ncomp, :])
if transform or unweight:
projection *= np.sqrt(self.weights)
if transform:
# Remove the weights, which do not depend on transformation
if self._standardize:
projection *= self._sigma
if self._standardize or self._demean:
projection += self._mu
if self._index is not None:
projection = pd.DataFrame(projection,
columns=self._columns,
index=self._index)
return projection
def _to_pandas(self):
"""
Returns pandas DataFrames for all values
"""
index = self._index
# Principal Components
num_zeros = np.ceil(np.log10(self._ncomp))
comp_str = 'comp_{0:0' + str(int(num_zeros)) + 'd}'
cols = [comp_str.format(i) for i in range(self._ncomp)]
df = pd.DataFrame(self.factors, columns=cols, index=index)
self.scores = self.factors = df
# Projections
df = pd.DataFrame(self.projection,
columns=self._columns,
index=index)
self.projection = df
# Weights
df = pd.DataFrame(self.coeff, index=cols, columns=self._columns)
self.coeff = df
# Loadings
df = pd.DataFrame(self.loadings, index=self._columns, columns=cols)
self.loadings = df
# eigenvals
self.eigenvals = pd.Series(self.eigenvals)
self.eigenvals.name = 'eigenvals'
# eigenvecs
vec_str = comp_str.replace('comp', 'eigenvec')
cols = [vec_str.format(i) for i in range(self.eigenvecs.shape[1])]
self.eigenvecs = pd.DataFrame(self.eigenvecs, columns=cols)
# R2
self.rsquare = pd.Series(self.rsquare)
self.rsquare.index.name = 'ncomp'
self.rsquare.name = 'rsquare'
# IC
self.ic = pd.DataFrame(self.ic, columns=['IC_p1', 'IC_p2', 'IC_p3'])
self.ic.index.name = 'ncomp'
def plot_scree(self, ncomp=None, log_scale=True, cumulative=False, ax=None):
"""
Plot of the ordered eigenvalues
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
included the same as the number of components computed
log_scale : boot, optional
Flag indicating whether ot use a log scale for the y-axis
cumulative : bool, optional
Flag indicating whether to plot the eigenvalues or cumulative
eigenvalues
ax : Matplotlib axes instance, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
fig : figure
Handle to the figure
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = self._ncomp if ncomp is None else ncomp
vals = np.asarray(self.eigenvals)
vals = vals[:self._ncomp]
if cumulative:
vals = np.cumsum(vals)
if log_scale:
ax.set_yscale('log')
ax.plot(np.arange(ncomp), vals[: ncomp], 'bo')
ax.autoscale(tight=True)
xlim = np.array(ax.get_xlim())
sp = xlim[1] - xlim[0]
xlim += 0.02 * np.array([-sp, sp])
ax.set_xlim(xlim)
ylim = np.array(ax.get_ylim())
scale = 0.02
if log_scale:
sp = np.log(ylim[1] / ylim[0])
ylim = np.exp(np.array([np.log(ylim[0]) - scale * sp,
np.log(ylim[1]) + scale * sp]))
else:
sp = ylim[1] - ylim[0]
ylim += scale * np.array([-sp, sp])
ax.set_ylim(ylim)
ax.set_title('Scree Plot')
ax.set_ylabel('Eigenvalue')
ax.set_xlabel('Component Number')
fig.tight_layout()
return fig
def plot_rsquare(self, ncomp=None, ax=None):
"""
Box plots of the individual series R-square against the number of PCs
Parameters
----------
ncomp : int, optional
Number of components ot include in the plot. If None, will
plot the minimum of 10 or the number of computed components
ax : Matplotlib axes instance, optional
An axes on which to draw the graph. If omitted, new a figure
is created
Returns
-------
fig : figure
Handle to the figure
"""
import statsmodels.graphics.utils as gutils
fig, ax = gutils.create_mpl_ax(ax)
ncomp = 10 if ncomp is None else ncomp
ncomp = min(ncomp, self._ncomp)
# R2s in rows, series in columns
r2s = 1.0 - self._ess_indiv / self._tss_indiv
r2s = r2s[1:]
r2s = r2s[:ncomp]
ax.boxplot(r2s.T)
ax.set_title('Individual Input $R^2$')
ax.set_ylabel('$R^2$')
ax.set_xlabel('Number of Included Principal Components')
return fig
def pca(data, ncomp=None, standardize=True, demean=True, normalize=True,
gls=False, weights=None, method='svd'):
"""
Principal Component Analysis
Parameters
----------
data : array
Variables in columns, observations in rows.
ncomp : int, optional
Number of components to return. If None, returns the as many as the
smaller to the number of rows or columns of data.
standardize: bool, optional
Flag indicating to use standardized data with mean 0 and unit
variance. standardized being True implies demean.
demean : bool, optional
Flag indicating whether to demean data before computing principal
components. demean is ignored if standardize is True.
normalize : bool , optional
Indicates whether th normalize the factors to have unit inner
product. If False, the loadings will have unit inner product.
weights : array, optional
Series weights to use after transforming data according to standardize
or demean when computing the principal components.
gls : bool, optional
Flag indicating to implement a two-step GLS estimator where
in the first step principal components are used to estimate residuals,
and then the inverse residual variance is used as a set of weights to
estimate the final principal components
method : str, optional
Determines the linear algebra routine uses. 'eig', the default,
uses an eigenvalue decomposition. 'svd' uses a singular value
decomposition.
Returns
-------
factors : array or DataFrame
nobs by ncomp array of of principal components (also known as scores)
loadings : array or DataFrame
ncomp by nvar array of principal component loadings for constructing
the factors
projection : array or DataFrame
nobs by var array containing the projection of the data onto the ncomp
estimated factors
rsquare : array or Series
ncomp array where the element in the ith position is the R-square
of including the fist i principal components. The values are
calculated on the transformed data, not the original data.
ic : array or DataFrame
ncomp by 3 array containing the Bai and Ng (2003) Information
criteria. Each column is a different criteria, and each row
represents the number of included factors.
eigenvals : array or Series
nvar array of eigenvalues
eigenvecs : array or DataFrame
nvar by nvar array of eigenvectors
Notes
-----
This is a simple function wrapper around the PCA class. See PCA for more information
and additional methods.
"""
pc = PCA(data, ncomp=ncomp, standardize=standardize, demean=demean,
normalize=normalize, gls=gls, weights=weights, method=method)
return (pc.factors, pc.loadings, pc.projection, pc.rsquare, pc.ic,
pc.eigenvals, pc.eigenvecs)
| bsd-3-clause |
BhallaLab/benchmarks | moose_nrn_equivalence_testing/comparision_with_simple_HH_model_additional_mechanism/loader_moose.py | 2 | 5681 | """loader_moose.py:
Load a SWC file in MOOSE.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import numpy as np
import pylab
import moose
import time
import moose.utils as mu
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
sys.path.append('/opt/moose/Demos/util')
import rdesigneur as rd
import os
from moose.neuroml.ChannelML import ChannelML
# Global variable to log query to database.
db_query_ = {}
PI = 3.14159265359
frameRunTime = 0.001
FaradayConst = 96845.34
modelName = None
simulator = 'moose'
ncompts = 0
nchans = 0
_args = None
_records = {}
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='moose.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
_logger = logging.getLogger('')
def makePlot( cell ):
fig = plt.figure( figsize = ( 10, 12 ) )
chans = ['hd', 'kdr', 'na3', 'nax', 'kap', 'kad']
compts = cell.compartments
epos = cell.electrotonicDistanceFromSoma
gpos = cell.geometricalDistanceFromSoma
combo = list(zip( gpos, compts ))
#combo.sort( key=lambda c:c[1].x)
combo.sort( key= lambda c:c[0] )
for i in chans:
x = []
y = []
for j in combo:
area = j[1].length * j[1].diameter * PI
#x.append( j[1].x )
x.append( j[0] )
if moose.exists( j[1].path + '/' + i ):
elm = moose.element( j[1].path + '/' + i )
y.append( elm.Gbar / area )
else:
y.append( 0.0 )
pylab.plot( x, y, '-bo', label = i )
pylab.legend()
pylab.show()
def saveData( outfile ):
clock = moose.Clock('/clock')
assert clock
yvec = None
for k in _records:
if "soma" in k:
yvec = _records[k].vector
xvec = np.linspace(0, clock.currentTime, len(yvec))
with open(outfile, "wb") as f:
f.write("%s,%s\n" % ('time', 'soma'))
for i, t in enumerate(xvec):
f.write("%s,%s\n" % (t, yvec[i]))
_logger.debug("Done writing to file %s" % outfile)
def loadModel(filename, chanProto, chanDistrib, passiveDistrib):
"""Load the model and insert channels """
global modelName
global nchans, ncompts
# Load in the swc file.
modelName = "elec"
cellProto = [ ( filename, modelName ) ]
rdes = rd.rdesigneur( cellProto = cellProto
, combineSegments = True
, passiveDistrib = passiveDistrib
, chanProto = chanProto
, chanDistrib = chanDistrib
)
rdes.buildModel('/model')
compts = moose.wildcardFind( "/model/%s/#[ISA=CompartmentBase]"%modelName )
setupStimuls( compts[0] )
for compt in compts:
vtab = moose.Table( '%s/vm' % compt.path )
moose.connect( vtab, 'requestOut', compt, 'getVm' )
_records[compt.path] = vtab
nchans = len(set([x.path for x in
moose.wildcardFind('/model/elec/##[TYPE=ZombieHHChannel]')])
)
_logger.info("Total channels: %s" % nchans)
return _records
def setupStimuls(compt):
command = moose.PulseGen('%s/command' % compt.path)
command.level[0] = 1e-9
command.delay[0] = 0
command.width[0] = 0.1
m = moose.connect(command, 'output', compt, 'injectMsg')
def plots(filter='soma'):
global _records
global _args
toPlot = []
tables = {}
for k in _records:
if filter in k:
toPlot.append(k)
for k in toPlot:
tables[k] = _records[k]
mu.plotRecords(tables, subplot=True) #, outfile=_args.plots)
plt.show()
def countSpike():
import count_spike
global db_query_
soma = None
for k in _records.keys():
if "soma" in k.lower():
soma = _records[k].vector
break
if len(soma) > 0:
nSpikes, meanDT, varDT = count_spike.spikes_characterization( soma )
db_query_['number_of_spikes'] = nSpikes
db_query_['mean_spike_interval'] = meanDT
db_query_['variance_spike_interval'] = varDT
_logger.info("[MOOSE] Spike characteristics:")
_logger.info("\t num_spikes: {}, mean_dt: {}, var_dt: {}".format(
nSpikes, meanDT, varDT)
)
def main(args):
global _args
_args = args
global ncompts, nchans
loadModel(args.swc_file, args)
moose.reinit()
compts = moose.wildcardFind( "/model/%s/#[ISA=CompartmentBase]" % modelName )
ncompts = len(compts)
startt = time.time()
moose.start(args.sim_time)
t = time.time() - startt
db_query_['simulator'] = 'moose'
db_query_['number_of_compartments'] = ncompts
db_query_['number_of_channels'] = nchans
db_query_['simulation_time'] = args.sim_time
db_query_['run_time'] = t
db_query_['dt'] = args.sim_dt
db_query_['model_name'] = args.swc_file
countSpike()
dbEntry(db_query_)
saveData(outfile="_data/moose.csv")
| gpl-2.0 |
timodonnell/pyopen | setup.py | 1 | 1181 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = "0.0.7"
setup(
name="pyopen",
version=version,
author="Tim O'Donnell",
author_email="[email protected]",
packages=["pyopen", "pyopen.loaders"],
url="https://github.com/timodonnell/pyopen",
license="Apache License",
description="launch an interactive ipython session with specified files opened and parsed",
long_description=open('README.rst').read(),
download_url='https://github.com/timodonnell/pyopen/tarball/%s' % version,
entry_points={
'console_scripts': [
'pyopen = pyopen.command:run',
]
},
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
install_requires=[
"humanize",
"traitlets",
"six",
"xlrd",
"requests",
"tables",
"pandas>=0.16.1",
"nose>=1.3.1",
]
)
| apache-2.0 |
marbiouk/dsmtools | Tools/IntertidalToolsSpeciesPatternsAroundIslands.py | 1 | 25308 | #!/usr/bin/env python
import arcpy
import os
from math import radians, sin, cos, sqrt
import gc
import time
import csv
import pandas as pd
class IntertidalToolsSpeciesPatternsAroundIslands(object):
"""This class has the methods you need to define
to use your code as an ArcGIS Python Tool."""
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Species distribution patterns around islands"
self.description = """Orientation of species distributions for G. Williams"""
self.canRunInBackground = True
self.category = "Intertidal Tools" # Use your own category here, or an existing one.
def getParameterInfo(self):
# You can define a tool to have no parameters
params = []
# Input Island Line - Demarks the area in which we will spatially bootstrap
input_line = arcpy.Parameter(name="input_line_folder",
displayName="Input Island Line",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input",
)
input_line.value = "D:/2016/Corals/IndividualIslands_ByYear_Lines/TUT.shp"
params.append(input_line)
# Input Island Points - Original points that will be sampled by the spatial bootstrap
input_points = arcpy.Parameter(name="input_points",
displayName="Input Island Points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input",
)
input_points.value = "D:/2016/Corals/IndividualIslands_ByYear_Points/TUT_ALL.shp"
params.append(input_points)
# Select attribute column for the calculation
attribute_process = arcpy.Parameter(name="attribute_1",
displayName="Column to process",
datatype="Field",
parameterType="Required",
direction="Input")
attribute_process.value = "CORAL,CCA,MACROALGAE"
params.append(attribute_process)
# Select attribute column for the calculation
flag_field = arcpy.Parameter(name="flag_field",
displayName="Flag field",
datatype="Field",
parameterType="Required",
direction="Input")
# Derived parameter
flag_field.parameterDependencies = [input_points.name]
flag_field.value = "Flag"
params.append(flag_field)
# Distance to draw polygon - in metres
distance = arcpy.Parameter(name="distance",
displayName="Distance to capture sample points",
datatype="GPDouble",
parameterType="Required",
direction="Input",
)
distance.value = 100000
params.append(distance)
# Angle to capture patterns within circle - in degrees
angle = arcpy.Parameter(name="angle",
displayName="Angle for search",
datatype="GPLong",
parameterType="Required",
direction="Input",
)
angle.value = 10
params.append(angle)
# Output feature class
output_directory = arcpy.Parameter(name="output_directory",
displayName="Output directory",
datatype="DEWorkspace",
parameterType="Optional",
direction="Output",
)
output_directory.value = "D:/2016/Corals/IndividualIslands_ByYear_GP/TUT_ALL"
params.append(output_directory)
clean_up = arcpy.Parameter(name="clean_up",
displayName="Delete temporary files?",
datatype="GPBoolean",
parameterType="Required",
direction="Input",
)
clean_up.value = "False"
params.append(clean_up)
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
arcpy.env.overwriteOutput = True
arcpy.CheckOutExtension('Spatial')
arcpy.AddMessage("Orientation of species distributions")
for param in parameters:
arcpy.AddMessage("Parameter: %s = %s" % (param.name, param.valueAsText))
# Read in variables for the tool
input_line = parameters[0].valueAsText
input_points = parameters[1].valueAsText
attribute_process = parameters[2].valueAsText
flag_field = parameters[3].valueAsText
distance = parameters[4].value
angle = parameters[5].value
output_directory = parameters[6].valueAsText
clean_up = parameters[7].valueAsText
# Make output directory if it does not exist
output_directory.strip()
arcpy.AddMessage(output_directory)
if not os.path.exists(str(output_directory)):
os.makedirs(output_directory)
arcpy.env.workspace = output_directory
# 0 Describe files to set coordinate systems
desc_input = arcpy.Describe(input_points)
coord_system = desc_input.spatialReference
arcpy.env.outputCoordinateSystem = coord_system
# 1 Convert island line to a polygon - numpy work around due to lack of license
if not arcpy.Exists(os.path.join(output_directory, "Island_Poly.shp")):
def polygon_to_line_no_gap(input_line_, output_polygon):
array = arcpy.da.FeatureClassToNumPyArray(input_line_, ["SHAPE@X", "SHAPE@Y"], spatial_reference=coord_system, explode_to_points=True)
if array.size == 0:
arcpy.AddError("Line has no features, check to ensure it is OK")
else:
array2 = arcpy.Array()
for x, y in array:
pnt = arcpy.Point(x, y)
array2.add(pnt)
polygon = arcpy.Polygon(array2)
arcpy.CopyFeatures_management(polygon, output_polygon)
return
polygon_to_line_no_gap(input_line, os.path.join(output_directory, "Island_Poly.shp"))
# 2 Create Fishnet for random sampling of points within the cells of the net
extent = arcpy.Describe(input_points).extent
origin_coord = str(extent.XMin) + " " + str(extent.YMin)
y_coord = str(extent.XMin) + " " + str(extent.YMin + 1)
corner_coord = str(extent.XMax) + " " + str(extent.YMax)
island_area = 0
with arcpy.da.SearchCursor(os.path.join(output_directory,"Island_Poly.shp"), "SHAPE@") as rows:
for row in rows:
island_area += row[0].getArea("GEODESIC", "SQUAREKILOMETERS")
island_area_polygon = sqrt(island_area * 0.1) * 100
arcpy.AddMessage(
"....fishnet size is: " + str(round(island_area_polygon,2)) + " m x " + str(round(island_area_polygon,2)) + " m. Island area is: " + str(round(island_area,0)) + " km2.")
arcpy.CreateFishnet_management(out_feature_class=os.path.join(output_directory,"Fishnet.shp"),
origin_coord=origin_coord,
y_axis_coord=y_coord,
cell_width=island_area_polygon,
cell_height=island_area_polygon,
number_rows="",
number_columns="",
corner_coord=corner_coord,
labels="",
template="",
geometry_type="POLYGON"
)
arcpy.Intersect_analysis(in_features=os.path.join(output_directory,"Fishnet.shp") + " #;" + os.path.join(output_directory,"Island_Poly.shp") + " #",
out_feature_class=os.path.join(output_directory,"FishClip.shp"),
join_attributes="ONLY_FID",
cluster_tolerance="-1 Unknown",
output_type="INPUT")
arcpy.DefineProjection_management(os.path.join(output_directory,"FishClip.shp"), coord_system)
arcpy.AddField_management(os.path.join(output_directory,"FishClip.shp"), "Shape_Area", "DOUBLE")
arcpy.CalculateField_management(os.path.join(output_directory,"FishClip.shp"),
"Shape_Area",
"!SHAPE.AREA@SQUAREMETERS!",
"PYTHON_9.3")
maxvalue = arcpy.SearchCursor(os.path.join(output_directory,"FishClip.shp"),
"",
"",
"",
"Shape_Area" + " D").next().getValue("Shape_Area")
maxvalue = str(int(maxvalue-1))
where = '"Shape_Area" > ' + "%s" %maxvalue
arcpy.Select_analysis(in_features=os.path.join(output_directory,"FishClip.shp"),
out_feature_class=os.path.join(output_directory,"FishClipInner.shp"),
where_clause=where
)
# 3 Create n random points within the cells of the fishnet
arcpy.CreateRandomPoints_management(out_path=output_directory,
out_name="RndPts.shp",
constraining_feature_class=os.path.join(output_directory,"FishClipInner.shp"),
constraining_extent="0 0 250 250",
number_of_points_or_field="5",
minimum_allowed_distance="0 Meters",
create_multipoint_output="POINT",
multipoint_size="0")
arcpy.DefineProjection_management(os.path.join(output_directory,"RndPts.shp"), coord_system)
else:
arcpy.AddMessage("....skipping building polygons as they already exist")
# 3 Create spatial bootstrapping circle polygons
rows = arcpy.SearchCursor(os.path.join(output_directory,"RndPts.shp"))
desc = arcpy.Describe(os.path.join(output_directory,"RndPts.shp"))
shapefieldname = desc.ShapeFieldName
if not arcpy.Exists(os.path.join(output_directory,"SectorPoly.shp")):
arcpy.AddMessage("....now conducting spatial bootstrap.")
featureclass = os.path.join(output_directory, "SectorPoly.shp")
arcpy.CreateFeatureclass_management(os.path.dirname(featureclass), os.path.basename(featureclass), "Polygon")
arcpy.AddField_management(featureclass, str("FID_Fishne"), "TEXT", "", "", "150")
arcpy.AddField_management(featureclass, "BEARING", "SHORT", "", "", "4")
arcpy.DeleteField_management(featureclass, ["Id"])
arcpy.DefineProjection_management(featureclass, coord_system)
finalfeatureclass = os.path.join(output_directory,"Final.shp")
arcpy.CreateFeatureclass_management(os.path.dirname(finalfeatureclass), os.path.basename(finalfeatureclass), "Polygon")
arcpy.AddField_management(finalfeatureclass, str("FID_Fishne"), "TEXT", "", "", "150")
arcpy.AddField_management(finalfeatureclass, "BEARING", "SHORT", "", "", "4")
arcpy.DeleteField_management(finalfeatureclass, ["Id"])
arcpy.DefineProjection_management(finalfeatureclass, coord_system)
featureclass_in_mem = arcpy.CreateFeatureclass_management("in_memory", "featureclass_in_mem", "Polygon")
arcpy.AddField_management(featureclass_in_mem, "OriginID", "TEXT", "", "", "150")
arcpy.AddField_management(featureclass_in_mem, "BEARING", "SHORT", "", "", "4")
arcpy.DeleteField_management(featureclass_in_mem, ["Id"])
arcpy.DefineProjection_management(featureclass_in_mem, coord_system)
for row in rows:
angles = range(0, 360, angle)
feat = row.getValue(shapefieldname)
columnValue = row.getValue(str("FID"))
pnt = feat.getPart()
origin_x = pnt.X
origin_y = pnt.Y
for ang in angles:
angleorigin = float(int(ang))
# Point 1
(disp_x, disp_y) = (distance * sin(radians(angleorigin)), distance * cos(radians(angleorigin)))
(end_x, end_y) = (origin_x + disp_x, origin_y + disp_y)
# Point 2
anglestep = float(int(ang) + int(angle))
(disp2_x, disp2_y) = (distance * sin(radians(anglestep)), distance * cos(radians(anglestep)))
(end2_x, end2_y) = (origin_x + disp2_x, origin_y + disp2_y)
# Create a polygon geometry
array = arcpy.Array([arcpy.Point(origin_x, origin_y),
arcpy.Point(end_x, end_y),
arcpy.Point(end2_x, end2_y),
])
polygon = arcpy.Polygon(array)
with arcpy.da.InsertCursor(featureclass_in_mem, ['OriginID', 'BEARING', 'SHAPE@']) as cur:
cur.insertRow([columnValue, ang, polygon])
array.removeAll()
arcpy.CopyFeatures_management(r"in_memory\featureclass_in_mem",featureclass)
else:
arcpy.AddMessage("....using previous spatial bootstrap.")
arcpy.AddMessage("....now joining with observations")
query = '"' + str(flag_field) + '" = ' + str(0)
arcpy.MakeFeatureLayer_management(input_points, "input_points_query_sub")
arcpy.Select_analysis("input_points_query_sub", r"in_memory/input_points_query", query)
count_records = arcpy.GetCount_management(r"in_memory/input_points_query").getOutput(0)
arcpy.AddMessage("....total number of records to process: " + str(count_records))
if int(count_records) > 500:
arcpy.AddMessage("....spatial join will fail due to memory error, working around this limitation...")
count_records = arcpy.GetCount_management(os.path.join(output_directory, "SectorPoly.shp")).getOutput(0)
query_1_range = '"' + str("FID") + '" <= ' + str(int(count_records) / 4)
query_2_range = '"' + str("FID") + '" > ' + str(int(count_records) / 4) + ' And "' + str("FID") + '" < ' + str(int(count_records) / 2)
query_3_range = '"' + str("FID") + '" >= ' + str(int(count_records) / 2) + ' And "' + str("FID") + '" < ' + str(int(count_records) / 2 + int(count_records) / 4)
query_4_range = '"' + str("FID") + '" >= ' + str(int(count_records) / 2 + int(count_records) / 4)
query_list = [query_1_range, query_2_range, query_3_range, query_4_range]
count = 1
for i in query_list:
if not arcpy.Exists(os.path.join(output_directory, "SectorPoly" + str(count) + ".shp")):
arcpy.Select_analysis(os.path.join(output_directory, "SectorPoly.shp"), os.path.join(output_directory, "SectorPoly" + str(count) + ".shp"), i)
arcpy.SpatialJoin_analysis(os.path.join(output_directory, "SectorPoly" + str(count) + ".shp"),
r"in_memory/input_points_query",
os.path.join(output_directory, "SpatialJoin" + str(count) + ".shp"), "JOIN_ONE_TO_MANY",
"KEEP_ALL", "", "INTERSECT")
with arcpy.da.UpdateCursor(os.path.join(output_directory, "SpatialJoin" + str(count) + ".shp"), "Join_Count") as cursor:
for row in cursor:
if row[0] == 0:
cursor.deleteRow()
if not arcpy.Exists(os.path.join(output_directory, "SpatialJoin" + str(count) + ".csv")):
dbf2csv(os.path.join(output_directory, "SpatialJoin" + str(count) + ".dbf"), os.path.join(output_directory, "SpatialJoin" + str(count) + ".csv"))
count += 1
else:
arcpy.SpatialJoin_analysis(os.path.join(output_directory, "SectorPoly.shp"), r"in_memory/input_points_query", r"in_memory/points_SpatialJoin", "JOIN_ONE_TO_MANY", "KEEP_ALL", "", "INTERSECT")
with arcpy.da.UpdateCursor(r"in_memory/points_SpatialJoin", "Join_Count") as cursor:
for row in cursor:
if row[0] == 0:
cursor.deleteRow()
arcpy.CopyFeatures_management(r"in_memory/points_SpatialJoin",os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_join.shp"))
attribute_process = attribute_process.split(",")
if arcpy.Exists(r"in_memory/points_SpatialJoin"):
for i in attribute_process:
arcpy.AddMessage("....calculating statistics for " + str(i))
stats = [[i, "MEAN"], [i, "STD"]]
arcpy.Statistics_analysis(r"in_memory/points_SpatialJoin",
os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".dbf"),
stats, "BEARING")
else:
header_saved = False
if not arcpy.Exists(os.path.join(output_directory, "SpatialJoin_Merge" + ".csv")):
with open(os.path.join(output_directory, "SpatialJoin_Merge" + ".csv"), 'wb') as fout:
for num in range(1,5):
with open(os.path.join(output_directory, "SpatialJoin" + str(num) + ".csv")) as fin:
header = next(fin)
if not header_saved:
fout.write(header)
header_saved = True
for line in fin:
fout.write(line)
for i in attribute_process:
arcpy.AddMessage("....calculating statistics for " + str(i) + " using pandas1.")
chunks = pd.read_csv(os.path.join(output_directory, "SpatialJoin_Merge" + ".csv"), chunksize=100000)
pieces = [x.groupby('BEARING', as_index=False)[i].agg(['count', 'mean', 'std']) for x in chunks]
result = pd.concat(pieces)
result.columns = result.columns.droplevel(0)
result = result.reset_index()
name_mean = "MEAN_" + str(i)
name_std = "STD_" + str(i)
result.rename(columns={'count': 'FREQUENCY'}, inplace=True)
result.rename(columns={'mean': name_mean[0:10]}, inplace=True)
result.rename(columns={'std': name_std[0:10]}, inplace=True)
f = {'FREQUENCY':['sum'], name_mean[0:10]:['mean'], name_std[0:10]:['mean']}
result_2 = result.groupby('BEARING').agg(f)
result_2 = result_2.reset_index()
result_2 = result_2[['BEARING', 'FREQUENCY', name_mean[0:10], name_std[0:10]]]
result_2.to_csv(os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".csv"), index=False)
if os.path.exists(os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".csv")):
with open(os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".csv"), "r") as f:
reader = list(csv.reader(f, delimiter=","))
reader.pop(1)
reader.pop(1)
with open(os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".csv"), "w") as out:
writer = csv.writer(out, delimiter=",")
for row in reader:
writer.writerow(row)
result = arcpy.TableToDBASE_conversion(os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_" + i +".csv"),
output_directory)
try:
arcpy.Delete_management(r"in_memory/points_SpatialJoin")
arcpy.Delete_management(r"in_memory/input_points_query")
except:
pass
if clean_up == "true":
arcpy.Delete_management(os.path.join(output_directory,"Island_Line.shp"))
arcpy.CopyFeatures_management(os.path.join(output_directory,"Island_Poly.shp"),os.path.join(output_directory, os.path.splitext(os.path.basename(input_points))[0] + "_poly.shp"))
arcpy.Delete_management(os.path.join(output_directory,"Island_Poly.shp"))
arcpy.Delete_management(os.path.join(output_directory,"SectorPoly.shp"))
arcpy.Delete_management(os.path.join(output_directory,"Fishnet.shp"))
arcpy.Delete_management(os.path.join(output_directory,"Fishnet_label.shp"))
arcpy.Delete_management(os.path.join(output_directory,"FishClip.shp"))
arcpy.Delete_management(os.path.join(output_directory,"FishClipInner.shp"))
arcpy.Delete_management(os.path.join(output_directory,"RndPts.shp"))
if int(count_records) > 500:
arcpy.Delete_management(os.path.join(output_directory, "SectorPoly1" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SectorPoly2" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SectorPoly3" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SectorPoly4" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SpatialJoin1" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SpatialJoin2" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SpatialJoin3" + ".shp"))
arcpy.Delete_management(os.path.join(output_directory, "SpatialJoin4" + ".shp"))
arcpy.AddMessage("....completed: " + os.path.splitext(os.path.basename(input_points))[0] + ".")
arcpy.CheckInExtension('Spatial')
return
def dbf2csv(dbfpath, csvpath):
''' To convert .dbf file or any shapefile/featureclass to csv file
Inputs:
dbfpath: full path to .dbf file [input] or featureclass
csvpath: full path to .csv file [output]
edited from http://gis.stackexchange.com/questions/93303/bulk-convert-dbf-to-csv-in-a-folder-arcgis-10-1-using-python
'''
#import csv
rows = arcpy.SearchCursor(dbfpath)
csvFile = csv.writer(open(csvpath, 'wb')) #output csv
fieldnames = [f.name for f in arcpy.ListFields(dbfpath)]
csvFile.writerow(fieldnames)
for row in rows:
rowlist = []
for field in fieldnames:
rowlist.append(row.getValue(field))
csvFile.writerow(rowlist)
def main():
tool = IntertidalToolsSpeciesPatternsAroundIslands()
tool.execute(tool.getParameterInfo(), None)
if __name__=='__main__':
main()
| mit |
kaushik94/sympy | sympy/external/importtools.py | 3 | 7520 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
from distutils.version import LooseVersion
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning, stacklevel=2)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning,
stacklevel=2)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)),
stacklevel=2)
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if LooseVersion(modversion) < LooseVersion(min_module_version):
if warn_old_version:
# Attempt to create a pretty string version of the version
from ..core.compatibility import string_types
if isinstance(min_module_version, string_types):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning, stacklevel=2)
return
return mod
| bsd-3-clause |
snowicecat/umich-eecs445-f16 | lecture07_naive-bayes/Lec07.py | 2 | 5343 | # plotting
from matplotlib import pyplot as plt;
from matplotlib import colors
import matplotlib as mpl;
from mpl_toolkits.mplot3d import Axes3D
if "bmh" in plt.style.available: plt.style.use("bmh");
# matplotlib objects
from matplotlib import mlab;
from matplotlib import gridspec;
# scientific
import numpy as np;
import scipy as scp;
from scipy import linalg
import scipy.stats;
# table display
import pandas as pd
from IPython.display import display
# python
import random;
# warnings
import warnings
warnings.filterwarnings("ignore")
# rise config
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'theme': 'simple',
'start_slideshow_at': 'selected',
'transition':'fade',
'scroll': False
});
def lin_reg_classifier(means, covs, n, outliers):
"""
Least Squares for Classification.
:Parameters:
- `means`: means of multivariate normal distributions used to generate data.
- `covs`: terms of variance-covariance matrix used to determine spread of simulated data.
- `n`: number of samples.
- `outliers`: user-specified outliers to be added to the second simulated dataset.
"""
# generate data
x1, y1 = np.random.multivariate_normal(means[0], covs[0], n[0]).T
x2, y2 = np.random.multivariate_normal(means[1], covs[1], n[1]).T
# add targets
class_1 = [1]*n[0] + [0]*n[1]
class_2 = [0]*n[0] + [1]*n[1]
T = np.mat([class_1, class_2]).T
# add intercept and merge data
ones = np.ones(n[0]+n[1])
a = np.hstack((x1,x2))
b = np.hstack((y1,y2))
X = np.mat([ones, a, b]).T
# obtain weights
w_t = np.dot(T.T, np.linalg.pinv(X).T)
# obtain decision line
decision_line_int = -(w_t.item((0,0)) - w_t.item((1,0)))/(w_t.item((0,2)) - w_t.item((1,2)))
decision_line_slope = - (w_t.item((0,1)) - w_t.item((1,1)))/(w_t.item((0,2)) - w_t.item((1,2)))
# add outliers to the second set of simulated data
extract_x = []
extract_y = []
for i in outliers:
extract_x.append(i[0])
extract_y.append(i[1])
x2_out = np.hstack((x2, extract_x))
y2_out = np.hstack((y2, extract_y))
class_1_out = [1]*n[0] + [0]*n[1] + [0]*len(outliers)
class_2_out = [0]*n[0] + [1]*n[1] + [1]*len(outliers)
T_out = np.array([class_1_out, class_2_out]).T
ones_out = np.ones(n[0]+n[1]+len(outliers))
a_out = np.hstack((x1,x2_out))
b_out = np.hstack((y1,y2_out))
X_out = np.array([ones_out, a_out, b_out]).T
# obtain revised weights and decision line
w_t_out = np.dot(T_out.T, np.linalg.pinv(X_out).T)
decision_line_int_out = -(w_t_out[0][0] - w_t_out[1][0])/(w_t_out[0][2] - w_t_out[1][2])
decision_line_slope_out = - (w_t_out[0][1] - w_t_out[1][1])/(w_t_out[0][2] - w_t_out[1][2])
# plot results
x = np.linspace(np.min(a_out)-3 , np.max(a_out)+3, 100)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=False, sharey=True)
plt.suptitle('Least Squares for Classification')
ax1.plot(x, decision_line_int+decision_line_slope*x, 'k', linewidth=2)
ax1.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
ax2.plot(x, decision_line_int_out+decision_line_slope_out*x, 'k', linewidth=2)
ax2.plot(x1, y1, 'go', x2, y2, 'bs', alpha=0.4)
for i in range(len(outliers)):
ax2.plot(outliers[i][0], outliers[i][1], 'bs', alpha=0.4)
fig.set_size_inches(15, 5, forward=True)
ax1.set_xlim([np.min(a_out)-1, np.max(a_out)+1,])
ax2.set_xlim([np.min(a_out)-1, np.max(a_out)+1])
ax1.set_ylim([np.min(b_out)-1, np.max(b_out)+1,])
ax2.set_ylim([np.min(b_out)-1, np.max(b_out)+1])
ax1.set_xlabel('X1')
ax2.set_xlabel('X1')
ax1.set_ylabel('X2')
plt.show()
def generate_gda(means, covs, num_samples):
num_classes = len(means);
num_samples //= num_classes;
# cheat and draw equal number of samples from each gaussian
samples = [
np.random.multivariate_normal(means[c],covs[c],num_samples).T
for c in range(num_classes)
];
return np.concatenate(samples, axis=1);
def plot_decision_contours(means, covs):
# plt
fig = plt.figure(figsize=(10,6));
ax = fig.gca();
# generate samples
data_x,data_y = generate_gda(means, covs, 1000);
ax.plot(data_x, data_y, 'x');
# dimensions
min_x, max_x = -10,10;
min_y, max_y = -10,10;
# grid
delta = 0.025
x = np.arange(min_x, max_x, delta);
y = np.arange(min_y, max_y, delta);
X, Y = np.meshgrid(x, y);
# bivariate difference of gaussians
mu1,mu2 = means;
sigma1, sigma2 = covs;
Z1 = mlab.bivariate_normal(X, Y, sigmax=sigma1[0][0], sigmay=sigma1[1][1], mux=mu1[0], muy=mu1[1], sigmaxy=sigma1[0][1]);
Z2 = mlab.bivariate_normal(X, Y, sigmax=sigma2[0][0], sigmay=sigma2[1][1], mux=mu2[0], muy=mu2[1], sigmaxy=sigma2[0][1]);
Z = Z2 - Z1;
# contour plot
ax.contour(X, Y, Z, levels=np.linspace(np.min(Z),np.max(Z),10));
cs = ax.contour(X, Y, Z, levels=[0], c="k", linewidths=5);
plt.clabel(cs, fontsize=10, inline=1, fmt='%1.3f')
# plot settings
ax.set_xlim((min_x,max_x));
ax.set_ylim((min_y,max_y));
# ax.set_title("Gaussian Discriminant Analysis: $P(y=1 | x) - P(y=0 | x)$", fontsize=20)
ax.set_title("Countours: $P(y=1 | x) - P(y=0 | x)$", fontsize=20) | mit |
JeanKossaifi/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
GunoH/intellij-community | python/helpers/pycharm_matplotlib_backend/backend_interagg.py | 10 | 3831 | import base64
import matplotlib
import os
import sys
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import FigureManagerBase, ShowBase
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from datalore.display import debug, display, SHOW_DEBUG_INFO
PY3 = sys.version_info[0] >= 3
index = int(os.getenv("PYCHARM_MATPLOTLIB_INDEX", 0))
rcParams = matplotlib.rcParams
class Show(ShowBase):
def __call__(self, **kwargs):
debug("show() called with args %s" % kwargs)
managers = Gcf.get_all_fig_managers()
if not managers:
debug("Error: Managers list in `Gcf.get_all_fig_managers()` is empty")
return
for manager in managers:
manager.show(**kwargs)
def mainloop(self):
pass
show = Show()
# from pyplot API
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.show()
else:
debug("Error: Figure manager `Gcf.get_active()` is None")
# from pyplot API
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
# from pyplot API
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasInterAgg(figure)
manager = FigureManagerInterAgg(canvas, num)
return manager
# from pyplot API
class FigureCanvasInterAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self, figure)
def show(self):
FigureCanvasAgg.draw(self)
if matplotlib.__version__ < '1.2':
buffer = self.tostring_rgb(0, 0)
else:
buffer = self.tostring_rgb()
if len(set(buffer)) <= 1:
# do not plot empty
debug("Error: Buffer FigureCanvasAgg.tostring_rgb() is empty")
return
render = self.get_renderer()
width = int(render.width)
debug("Image width: %d" % width)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive:
debug("Using interactive mode (Run with Python Console)")
debug("Plot index = %d" % index)
else:
debug("Using non-interactive mode (Run without Python Console)")
plot_index = index if is_interactive else -1
display(DisplayDataObject(plot_index, width, buffer))
def draw(self):
FigureCanvasAgg.draw(self)
is_interactive = os.getenv("PYCHARM_MATPLOTLIB_INTERACTIVE", False)
if is_interactive and matplotlib.is_interactive():
self.show()
else:
debug("Error: calling draw() in non-interactive mode won't show a plot. Try to 'Run with Python Console'")
class FigureManagerInterAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
global index
index += 1
self.canvas = canvas
self._num = num
self._shown = False
def show(self, **kwargs):
self.canvas.show()
Gcf.destroy(self._num)
class DisplayDataObject:
def __init__(self, plot_index, width, image_bytes):
self.plot_index = plot_index
self.image_width = width
self.image_bytes = image_bytes
def _repr_display_(self):
image_bytes_base64 = base64.b64encode(self.image_bytes)
if PY3:
image_bytes_base64 = image_bytes_base64.decode()
body = {
'plot_index': self.plot_index,
'image_width': self.image_width,
'image_base64': image_bytes_base64
}
return ('pycharm-plot-image', body)
| apache-2.0 |
dyoung418/tensorflow | tensorflow/examples/learn/iris_run_config.py | 76 | 2565 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. tf_random_seed.
run_config = tf.estimator.RunConfig().replace(tf_random_seed=1)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3,
config=run_config)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
joshloyal/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 58 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
mugizico/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
spennihana/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_gbm.py | 6 | 9236 | from __future__ import print_function
from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def weights_check():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = H2OGradientBoostingEstimator(min_rows=20,
ntrees=5,
seed=20,
max_depth=4)
gbm1_regression.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy",
training_frame=data1)
gbm2_regression = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
ntrees=5,
seed=20,
max_depth=4)
gbm2_regression.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy",
training_frame=data2,
weights_column="weights")
gbm1_binomial = H2OGradientBoostingEstimator(min_rows=20,
distribution="bernoulli",
ntrees=5,
seed=20,
max_depth=4)
gbm1_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy_20mpg",
training_frame=data1)
gbm2_binomial = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
distribution="bernoulli",
ntrees=5,
seed=20,
max_depth=4)
gbm2_binomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="economy_20mpg",
training_frame=data2,
weights_column="weights")
gbm1_multinomial = H2OGradientBoostingEstimator(min_rows=20,
distribution="multinomial",
ntrees=5,
seed=20,
max_depth=4)
gbm1_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="cylinders",
training_frame=data1)
gbm2_multinomial = H2OGradientBoostingEstimator(min_rows=20*min_rows_scale,
distribution="multinomial",
ntrees=5,
seed=20,
max_depth=4)
gbm2_multinomial.train(x=["displacement", "power", "weight", "acceleration", "year"],
y="cylinders",
weights_column="weights", training_frame=data2)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print("MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse))
print("AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc))
print("MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse))
assert abs(reg1_mse - reg2_mse) < 1e-5 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-3 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = 3 # random.randint(1,10) # PY3 hackt
uniform_weights = [[weight]] *406
h2o_uniform_weights = h2o.H2OFrame(uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print("Checking that using uniform weights is equivalent to no weights:")
print()
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0]] # [[0 if random.randint(0,1) else 1 for r in range(406)]]
h2o_zero_weights = h2o.H2OFrame(list(zip(*zero_weights)))
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print("Checking that using some zero weights is equivalent to removing those observations:")
print()
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 1, 2, 1, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 1, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 1, 1, 2, 1]] # [[1 if random.randint(0,1) else 2 for r in range(406)]]
h2o_doubled_weights = h2o.H2OFrame(list(zip(*doubled_weights)))
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights[0]):
if w == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(doubled_data)
h2o_data_doubled.set_names(list(colnames))
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print("Checking that doubling some weights is equivalent to doubling those observations:")
print()
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_check)
else:
weights_check()
| apache-2.0 |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| gpl-3.0 |
josephcslater/scipy | tools/refguide_check.py | 4 | 29462 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.linalg\.solve_lyapunov' # deprecated name
]
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
int_pattern = re.compile('^[0-9]+L?$')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# python 2 long integers are equal to python 3 integers
if self.int_pattern.match(want) and self.int_pattern.match(got):
if want.rstrip("L\r\n") == got.rstrip("L\r\n"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
abimannans/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.