repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
RayMick/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
securestate/king-phisher | king_phisher/client/graphs.py | 4 | 36836 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/graphs.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import logging
import string
from king_phisher import color
from king_phisher import geoip
from king_phisher import its
from king_phisher import ua_parser
from king_phisher import utilities
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
from king_phisher.constants import ColorHexCode
from gi.repository import Gtk
from smoke_zephyr.requirements import check_requirements
from smoke_zephyr.utilities import unique
try:
import matplotlib
matplotlib.rcParams['backend'] = 'GTK3Cairo'
from matplotlib import dates
from matplotlib import patches
from matplotlib import pyplot
from matplotlib import ticker
from matplotlib import lines
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.backends.backend_gtk3cairo import FigureManagerGTK3Cairo as FigureManager
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
except ImportError:
has_matplotlib = False
"""Whether the :py:mod:`matplotlib` module is available."""
else:
has_matplotlib = True
try:
import mpl_toolkits.basemap
except ImportError:
has_matplotlib_basemap = False
"""Whether the :py:mod:`mpl_toolkits.basemap` module is available."""
else:
if not its.frozen and check_requirements(['basemap>=1.0.7']):
has_matplotlib_basemap = False
else:
has_matplotlib_basemap = True
EXPORTED_GRAPHS = {}
MPL_COLOR_NULL = 'darkcyan'
PERCENT_FORMAT = '.3g'
__all__ = ('export_graph_provider', 'get_graph', 'get_graphs', 'CampaignGraph')
def _matrices_add(mat1, mat2):
if not len(mat1) == len(mat2):
raise RuntimeError('len(mat1) != len(mat2)')
return [mat1[i] + mat2[i] for i in range(len(mat1))]
def export_graph_provider(cls):
"""
Decorator to mark classes as valid graph providers. This decorator also sets
the :py:attr:`~.CampaignGraph.name` attribute.
:param class cls: The class to mark as a graph provider.
:return: The *cls* parameter is returned.
"""
if not issubclass(cls, CampaignGraph):
raise RuntimeError("{0} is not a subclass of CampaignGraph".format(cls.__name__))
if not cls.is_available:
return None
graph_name = cls.__name__[13:]
cls.name = graph_name
EXPORTED_GRAPHS[graph_name] = cls
return cls
def get_graph(graph_name):
"""
Return the graph providing class for *graph_name*. The class providing the
specified graph must have been previously exported using
:py:func:`.export_graph_provider`.
:param str graph_name: The name of the graph provider.
:return: The graph provider class.
:rtype: :py:class:`.CampaignGraph`
"""
return EXPORTED_GRAPHS.get(graph_name)
def get_graphs():
"""
Get a list of all registered graph providers.
:return: All registered graph providers.
:rtype: list
"""
return sorted(EXPORTED_GRAPHS.keys())
class GraphBase(object):
"""
A basic graph provider for using :py:mod:`matplotlib` to create graph
representations of campaign data. This class is meant to be subclassed
by real providers.
"""
name = 'Unknown'
"""The name of the graph provider."""
name_human = 'Unknown'
"""The human readable name of the graph provider used for UI identification."""
graph_title = 'Unknown'
"""The title that will be given to the graph."""
is_available = True
def __init__(self, application, size_request=None, style_context=None):
"""
:param tuple size_request: The size to set for the canvas.
"""
self.logger = logging.getLogger('KingPhisher.Client.Graph.' + self.__class__.__name__[13:])
self.application = application
self.style_context = style_context
self.config = application.config
"""A reference to the King Phisher client configuration."""
self.figure, _ = pyplot.subplots()
self.figure.set_facecolor(self.get_color('bg', ColorHexCode.WHITE))
self.axes = self.figure.get_axes()
self.canvas = FigureCanvas(self.figure)
self.manager = None
self.minimum_size = (380, 200)
"""An absolute minimum size for the canvas."""
if size_request is not None:
self.resize(*size_request)
self.canvas.mpl_connect('button_press_event', self.mpl_signal_canvas_button_pressed)
self.canvas.show()
self.navigation_toolbar = NavigationToolbar(self.canvas, self.application.get_active_window())
self.popup_menu = managers.MenuManager()
self.popup_menu.append('Export', self.signal_activate_popup_menu_export)
self.popup_menu.append('Refresh', self.signal_activate_popup_refresh)
menu_item = Gtk.CheckMenuItem.new_with_label('Show Toolbar')
menu_item.connect('toggled', self.signal_toggled_popup_menu_show_toolbar)
self._menu_item_show_toolbar = menu_item
self.popup_menu.append_item(menu_item)
self.navigation_toolbar.hide()
self._legend = None
@property
def rpc(self):
return self.application.rpc
@staticmethod
def _ax_hide_ticks(ax):
for tick in ax.yaxis.get_major_ticks():
tick.tick1On = False
tick.tick2On = False
@staticmethod
def _ax_set_spine_color(ax, spine_color):
for pos in ('top', 'right', 'bottom', 'left'):
ax.spines[pos].set_color(spine_color)
def add_legend_patch(self, legend_rows, fontsize=None):
if matplotlib.__version__ == '3.0.2':
self.logger.warning('skipping legend patch with matplotlib v3.0.2 for compatibility')
return
if self._legend is not None:
self._legend.remove()
self._legend = None
fontsize = fontsize or self.fontsize_scale
legend_bbox = self.figure.legend(
tuple(patches.Patch(color=patch_color) for patch_color, _ in legend_rows),
tuple(label for _, label in legend_rows),
borderaxespad=1.25,
fontsize=fontsize,
frameon=True,
handlelength=1.5,
handletextpad=0.75,
labelspacing=0.3,
loc='lower right'
)
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def get_color(self, color_name, default):
"""
Get a color by its style name such as 'fg' for foreground. If the
specified color does not exist, default will be returned. The underlying
logic for this function is provided by
:py:func:`~.gui_utilities.gtk_style_context_get_color`.
:param str color_name: The style name of the color.
:param default: The default color to return if the specified one was not found.
:return: The desired color if it was found.
:rtype: tuple
"""
color_name = 'theme_color_graph_' + color_name
sc_color = gui_utilities.gtk_style_context_get_color(self.style_context, color_name, default)
return (sc_color.red, sc_color.green, sc_color.blue)
def make_window(self):
"""
Create a window from the figure manager.
:return: The graph in a new, dedicated window.
:rtype: :py:class:`Gtk.Window`
"""
if self.manager is None:
self.manager = FigureManager(self.canvas, 0)
self.navigation_toolbar.destroy()
self.navigation_toolbar = self.manager.toolbar
self._menu_item_show_toolbar.set_active(True)
window = self.manager.window
window.set_transient_for(self.application.get_active_window())
window.set_title(self.graph_title)
return window
@property
def fontsize_scale(self):
scale = self.markersize_scale
if scale < 5:
fontsize = 'xx-small'
elif scale < 7:
fontsize = 'x-small'
elif scale < 9:
fontsize = 'small'
else:
fontsize = 'medium'
return fontsize
@property
def markersize_scale(self):
bbox = self.axes[0].get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())
return bbox.width * self.figure.dpi * 0.01
def mpl_signal_canvas_button_pressed(self, event):
if event.button != 3:
return
self.popup_menu.menu.popup(None, None, None, None, event.button, Gtk.get_current_event_time())
return True
def signal_activate_popup_menu_export(self, action):
dialog = extras.FileChooserDialog('Export Graph', self.application.get_active_window())
file_name = self.config['campaign_name'] + '.png'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
self.figure.savefig(destination_file, dpi=200, facecolor=self.figure.get_facecolor(), format='png')
def signal_activate_popup_refresh(self, event):
self.refresh()
def signal_toggled_popup_menu_show_toolbar(self, widget):
if widget.get_property('active'):
self.navigation_toolbar.show()
else:
self.navigation_toolbar.hide()
def resize(self, width=0, height=0):
"""
Attempt to resize the canvas. Regardless of the parameters the canvas
will never be resized to be smaller than :py:attr:`.minimum_size`.
:param int width: The desired width of the canvas.
:param int height: The desired height of the canvas.
"""
min_width, min_height = self.minimum_size
width = max(width, min_width)
height = max(height, min_height)
self.canvas.set_size_request(width, height)
class CampaignGraph(GraphBase):
"""
Graph format used for the graphs generated in the dashboard and
in the create graphs tab.
"""
def _load_graph(self, info_cache):
raise NotImplementedError()
def load_graph(self):
"""Load the graph information via :py:meth:`.refresh`."""
self.refresh()
def refresh(self, info_cache=None, stop_event=None):
"""
Refresh the graph data by retrieving the information from the
remote server.
:param dict info_cache: An optional cache of data tables.
:param stop_event: An optional object indicating that the operation should stop.
:type stop_event: :py:class:`threading.Event`
:return: A dictionary of cached tables from the server.
:rtype: dict
"""
info_cache = (info_cache or {})
if not self.rpc:
return info_cache
if stop_event and stop_event.is_set():
return info_cache
if not info_cache:
info_cache = self._get_graphql_campaign_cache(self.config['campaign_id'])
for ax in self.axes:
ax.clear()
if self._legend is not None:
self._legend.remove()
self._legend = None
self._load_graph(info_cache)
self.figure.suptitle(
self.graph_title,
color=self.get_color('fg', ColorHexCode.BLACK),
size=14,
weight='bold',
y=0.97
)
self.canvas.draw()
return info_cache
def _get_graphql_campaign_cache(self, campaign_id):
options = {'campaign': campaign_id}
results = self.rpc.graphql("""\
query getCampaignGraphing($campaign: String!) {
db {
campaign(id: $campaign) {
name
description
expiration
messages {
total
edges {
node {
id
targetEmail
firstName
lastName
opened
openerIp
openerUserAgent
sent
trained
companyDepartment {
id
name
}
}
}
}
visits {
total
edges {
node {
id
messageId
campaignId
count
ip
ipGeoloc {
city
continent
coordinates
country
postalCode
timeZone
}
firstSeen
lastSeen
userAgent
}
}
}
credentials {
total
edges {
node {
id
visitId
messageId
campaignId
username
password
submitted
}
}
}
}
}
}""", options)
info_cache = {
'campaign': {
'name': results['db']['campaign']['name'],
'description': results['db']['campaign']['description'],
'expiration': results['db']['campaign']['expiration'],
},
'messages': results['db']['campaign']['messages'],
'visits': results['db']['campaign']['visits'],
'credentials': results['db']['campaign']['credentials']
}
return info_cache
class CampaignBarGraph(CampaignGraph):
subplot_adjustment = {'top': 0.9, 'right': 0.85, 'bottom': 0.05, 'left': 0.225}
yticklabel_config = {
'left': {'size': 10},
'right': {'format': "{0:,}", 'size': 12}
}
def __init__(self, *args, **kwargs):
super(CampaignBarGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(**self.subplot_adjustment)
ax = self.axes[0]
ax.tick_params(
axis='both',
top=False,
right=False,
bottom=False,
left=False,
labelbottom=False
)
ax.invert_yaxis()
self.axes.append(ax.twinx())
def _barh_stacked(self, ax, bars, bar_colors, height):
"""
:param ax: This axis to use for the graph.
:param tuple bars: A two dimensional array of bars, and their respective stack sizes.
:param tuple bar_colors: A one dimensional array of colors for each of the stacks.
:param float height: The height of the bars.
:return:
"""
# define the necessary colors
ax.set_facecolor(self.get_color('bg', ColorHexCode.WHITE))
self.resize(height=60 + 20 * len(bars))
bar_count = len(bars)
columns = []
left_subbars = [0] * bar_count
columns.extend(zip(*bars))
for right_subbars, color, in zip(columns, bar_colors):
bar_container = ax.barh(
range(len(bars)),
right_subbars,
color=color,
height=height,
left=left_subbars,
linewidth=0,
)
left_subbars = _matrices_add(left_subbars, right_subbars)
return bar_container
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_bar(self, title):
return self.graph_bar([0], [''], xlabel=title)
def graph_bar(self, bars, yticklabels, xlabel=None):
"""
Create a horizontal bar graph with better defaults for the standard use
cases.
:param list bars: The values of the bars to graph.
:param list yticklabels: The labels to use on the x-axis.
:param str xlabel: The label to give to the y-axis.
:return: The bars created using :py:mod:`matplotlib`
:rtype: `matplotlib.container.BarContainer`
"""
largest = (max(bars) if len(bars) else 0)
bars = [[cell, largest - cell] for cell in bars]
bar_colors = (self.get_color('bar_fg', ColorHexCode.BLACK), self.get_color('bar_bg', ColorHexCode.GRAY))
return self.graph_bar_stacked(bars, bar_colors, yticklabels, xlabel=xlabel)
def graph_bar_stacked(self, bars, bar_colors, yticklabels, xlabel=None):
height = 0.275
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
ax1, ax2 = self.axes # primary axis
bar_container = self._barh_stacked(ax1, bars, bar_colors, height)
yticks = [float(y) + (height / 2) for y in range(len(bars))]
# this makes the top bar shorter than the rest
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticklabels, color=color_fg, size=self.yticklabel_config['left']['size'])
ax2.set_yticks(yticks)
ax2.set_yticklabels(
[self.yticklabel_config['right']['format'].format(*subbar, PERCENT=PERCENT_FORMAT) for subbar in bars],
color=color_fg,
size=self.yticklabel_config['right']['size']
)
ax2.set_ylim(ax1.get_ylim())
# remove the y-axis tick marks
self._ax_hide_ticks(ax1)
self._ax_hide_ticks(ax2)
self._ax_set_spine_color(ax1, color_bg)
self._ax_set_spine_color(ax2, color_bg)
if xlabel:
ax1.set_xlabel(xlabel, color=color_fg, size=12)
return bar_container
class CampaignLineGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignLineGraph, self).__init__(*args, **kwargs)
def _load_graph(self, info_cache):
raise NotImplementedError()
class CampaignPieGraph(CampaignGraph):
def __init__(self, *args, **kwargs):
super(CampaignPieGraph, self).__init__(*args, **kwargs)
self.figure.subplots_adjust(top=0.85, right=0.75, bottom=0.05, left=0.05)
def _load_graph(self, info_cache):
raise NotImplementedError()
def _graph_null_pie(self, title):
ax = self.axes[0]
ax.pie(
(100,),
autopct='%1.0f%%',
colors=(self.get_color('pie_low', ColorHexCode.GRAY),),
labels=(title,),
shadow=True,
startangle=225,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)}
)
ax.axis('equal')
return
def graph_pie(self, parts, autopct=None, labels=None, legend_labels=None):
colors = color.get_scale(
self.get_color('pie_low', ColorHexCode.BLACK),
self.get_color('pie_high', ColorHexCode.GRAY),
len(parts),
ascending=False
)
ax = self.axes[0]
pie = ax.pie(
parts,
autopct=autopct,
colors=colors,
explode=[0.1] + ([0] * (len(parts) - 1)),
labels=labels or tuple("{0:{PERCENT}}%".format(p, PERCENT=PERCENT_FORMAT) for p in parts),
labeldistance=1.15,
shadow=True,
startangle=45,
textprops={'color': self.get_color('fg', ColorHexCode.BLACK)},
wedgeprops={'linewidth': 0}
)
ax.axis('equal')
if legend_labels is not None:
self.add_legend_patch(tuple(zip(colors, legend_labels)), fontsize='x-small')
return pie
@export_graph_provider
class CampaignGraphDepartmentComparison(CampaignBarGraph):
"""Display a graph which compares the different departments."""
graph_title = 'Department Comparison'
name_human = 'Bar - Department Comparison'
subplot_adjustment = {'top': 0.9, 'right': 0.775, 'bottom': 0.075, 'left': 0.225}
yticklabel_config = {
'left': {'size': 10},
'right': {'format': "{0:{PERCENT}}%, {1:{PERCENT}}%", 'size': 10}
}
def _load_graph(self, info_cache):
messages = info_cache['messages']['edges']
messages = [message['node'] for message in messages if message['node']['companyDepartment'] is not None]
if not messages:
self._graph_null_bar('')
return
messages = dict((message['id'], message) for message in messages)
visits = info_cache['visits']['edges']
visits = [visit['node'] for visit in visits if visit['node']['messageId'] in messages]
visits = unique(visits, key=lambda visit: visit['messageId'])
visits = dict((visit['id'], visit) for visit in visits)
creds = info_cache['credentials']['edges']
creds = [cred['node'] for cred in creds if cred['node']['messageId'] in messages]
creds = unique(creds, key=lambda cred: cred['messageId'])
creds = dict((cred['id'], cred) for cred in creds)
department_messages = collections.Counter()
department_messages.update(message['companyDepartment']['name'] for message in messages.values())
department_visits = collections.Counter()
department_visits.update(messages[visit['messageId']]['companyDepartment']['name'] for visit in visits.values())
department_credentials = collections.Counter()
department_credentials.update(messages[cred['messageId']]['companyDepartment']['name'] for cred in creds.values())
bars = []
department_names = tuple(department_messages.keys())
for department_name in department_names:
dep_messages = float(department_messages[department_name])
dep_creds = float(department_credentials.get(department_name, 0)) / dep_messages * 100
dep_visits = (float(department_visits.get(department_name, 0)) / dep_messages * 100) - dep_creds
bars.append((
dep_creds,
dep_visits,
(100.0 - (dep_creds + dep_visits))
))
bar_colors = (
self.get_color('map_marker1', ColorHexCode.RED),
self.get_color('map_marker2', ColorHexCode.YELLOW),
self.get_color('bar_bg', ColorHexCode.GRAY)
)
self.graph_bar_stacked(
bars,
bar_colors,
department_names
)
self.add_legend_patch(tuple(zip(bar_colors[:2], ('With Credentials', 'Without Credentials'))), fontsize=10)
return
@export_graph_provider
class CampaignGraphOverview(CampaignBarGraph):
"""Display a graph which represents an overview of the campaign."""
graph_title = 'Campaign Overview'
name_human = 'Bar - Campaign Overview'
def _load_graph(self, info_cache):
visits = info_cache['visits']['edges']
creds = info_cache['credentials']['edges']
messages = info_cache['messages']
messages_count = messages['total']
messages_opened = [message['node'] for message in messages['edges'] if message['node']['opened'] is not None]
bars = []
bars.append(messages_count)
bars.append(len(messages_opened))
bars.append(len(visits))
bars.append(len(unique(visits, key=lambda visit: visit['node']['messageId'])))
if len(creds):
bars.append(len(creds))
bars.append(len(unique(creds, key=lambda cred: cred['node']['messageId'])))
yticklabels = ('Messages', 'Opened', 'Visits', 'Unique\nVisits', 'Credentials', 'Unique\nCredentials')
self.graph_bar(bars, yticklabels[:len(bars)])
return
@export_graph_provider
class CampaignGraphVisitorInfo(CampaignBarGraph):
"""Display a graph which shows the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Bar - Visitor OS Information'
def _load_graph(self, info_cache):
visits = info_cache['visits']['edges']
if not len(visits):
self._graph_null_bar('No Visitor Information')
return
operating_systems = collections.Counter()
for visit in visits:
user_agent = None
if visit['node']['userAgent']:
user_agent = ua_parser.parse_user_agent(visit['node']['userAgent'])
operating_systems.update([user_agent.os_name if user_agent and user_agent.os_name else 'Unknown OS'])
os_names = sorted(operating_systems.keys())
bars = [operating_systems[os_name] for os_name in os_names]
self.graph_bar(bars, os_names)
return
@export_graph_provider
class CampaignGraphVisitorInfoPie(CampaignPieGraph):
"""Display a graph which compares the different operating systems seen from visitors."""
graph_title = 'Campaign Visitor OS Information'
name_human = 'Pie - Visitor OS Information'
def _load_graph(self, info_cache):
visits = info_cache['visits']['edges']
if not len(visits):
self._graph_null_pie('No Visitor Information')
return
operating_systems = collections.Counter()
for visit in visits:
ua = ua_parser.parse_user_agent(visit['node']['userAgent'])
operating_systems.update([ua.os_name or 'Unknown OS' if ua else 'Unknown OS'])
(os_names, count) = tuple(zip(*reversed(sorted(operating_systems.items(), key=lambda item: item[1]))))
self.graph_pie(count, labels=tuple("{0:,}".format(os) for os in count), legend_labels=os_names)
return
@export_graph_provider
class CampaignGraphVisitsTimeline(CampaignLineGraph):
"""Display a graph which represents the visits of a campaign over time."""
graph_title = 'Campaign Visits Timeline'
name_human = 'Line - Visits Timeline'
def _load_graph(self, info_cache):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
color_line_fg = self.get_color('line_fg', ColorHexCode.BLACK)
visits = info_cache['visits']['edges']
first_seen_timestamps = [utilities.datetime_utc_to_local(visit['node']['firstSeen']) for visit in visits]
ax = self.axes[0]
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top=False,
bottom=False
)
ax.set_facecolor(color_line_bg)
ax.set_ylabel('Number of Visits', color=self.get_color('fg', ColorHexCode.WHITE), size=10)
self._ax_hide_ticks(ax)
self._ax_set_spine_color(ax, color_bg)
if not len(first_seen_timestamps):
ax.set_yticks((0,))
ax.set_xticks((0,))
return
first_seen_timestamps.sort()
ax.plot_date(
first_seen_timestamps,
range(1, len(first_seen_timestamps) + 1),
'-',
color=color_line_fg,
linewidth=6
)
self.figure.autofmt_xdate()
self.figure.subplots_adjust(top=0.85, right=0.95, bottom=0.25, left=0.1)
locator = dates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(dates.AutoDateFormatter(locator))
return
@export_graph_provider
class CampaignGraphMessageResults(CampaignPieGraph):
"""Display the percentage of messages which resulted in a visit."""
graph_title = 'Campaign Message Results'
name_human = 'Pie - Message Results'
def _load_graph(self, info_cache):
messages = info_cache['messages']
messages_count = messages['total']
if not messages_count:
self._graph_null_pie('No Messages Sent')
return
visits_count = len(unique(info_cache['visits']['edges'], key=lambda visit: visit['node']['messageId']))
credentials_count = len(unique(info_cache['credentials']['edges'], key=lambda cred: cred['node']['messageId']))
if not credentials_count <= visits_count <= messages_count:
raise ValueError('credential visit and message counts are inconsistent')
labels = ['Without Visit', 'With Visit', 'With Credentials']
sizes = []
sizes.append((float(messages_count - visits_count) / float(messages_count)) * 100)
sizes.append((float(visits_count - credentials_count) / float(messages_count)) * 100)
sizes.append((float(credentials_count) / float(messages_count)) * 100)
if not credentials_count:
labels.pop()
sizes.pop()
if not visits_count:
labels.pop()
sizes.pop()
self.graph_pie(sizes, legend_labels=labels)
return
class CampaignGraphVisitsMap(CampaignGraph):
"""A base class to display a map which shows the locations of visit origins."""
graph_title = 'Campaign Visit Locations'
is_available = has_matplotlib_basemap
draw_states = False
def _load_graph(self, info_cache):
visits = unique(info_cache['visits']['edges'], key=lambda visit: visit['node']['messageId'])
visits = [visit['node'] for visit in visits]
cred_ips = set(cred['node']['messageId'] for cred in info_cache['credentials']['edges'])
cred_ips = set([visit['ip'] for visit in visits if visit['messageId'] in cred_ips])
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_land = self.get_color('map_land', ColorHexCode.GRAY)
color_water = self.get_color('map_water', ColorHexCode.WHITE)
ax = self.axes[0]
bm = mpl_toolkits.basemap.Basemap(resolution='c', ax=ax, **self.basemap_args)
if self.draw_states:
bm.drawstates()
bm.drawcoastlines()
bm.drawcountries()
bm.fillcontinents(color=color_land, lake_color=color_water)
parallels = bm.drawparallels(
(-60, -30, 0, 30, 60),
labels=(1, 1, 0, 0)
)
self._map_set_line_color(parallels, color_fg)
meridians = bm.drawmeridians(
(0, 90, 180, 270),
labels=(0, 0, 0, 1)
)
self._map_set_line_color(meridians, color_fg)
bm.drawmapboundary(
fill_color=color_water,
linewidth=0
)
if not visits:
return
base_markersize = self.markersize_scale
base_markersize = max(base_markersize, 3.05)
base_markersize = min(base_markersize, 9)
self._plot_visitor_map_points(bm, visits, cred_ips, base_markersize)
self.add_legend_patch(((self.color_with_creds, 'With Credentials'), (self.color_without_creds, 'Without Credentials')))
return
def _plot_visitor_map_points(self, bm, visits, cred_ips, base_markersize):
ctr = collections.Counter()
ctr.update([visit['ip'] for visit in visits])
geo_locations = {}
for visit in visits:
if not visit['ipGeoloc']:
continue
ip_address = visit['ip']
geo_locations[ip_address] = geoip.GeoLocation.from_graphql(ip_address, visit['ipGeoloc'])
o_high = float(max(ctr.values())) if ctr else 0.0
o_low = float(min(ctr.values())) if ctr else 0.0
color_with_creds = self.color_with_creds
color_without_creds = self.color_without_creds
for visitor_ip, geo_location in geo_locations.items():
if not (geo_location.coordinates.longitude and geo_location.coordinates.latitude):
continue
occurrences = ctr[visitor_ip]
pts = bm(geo_location.coordinates.longitude, geo_location.coordinates.latitude)
if o_high == o_low:
markersize = 2.0
else:
markersize = 1.0 + (float(occurrences) - o_low) / (o_high - o_low)
markersize = markersize * base_markersize
bm.plot(
pts[0],
pts[1],
'o',
markeredgewidth=0,
markerfacecolor=(color_with_creds if visitor_ip in cred_ips else color_without_creds),
markersize=markersize
)
return
def _map_set_line_color(self, map_lines, line_color):
for sub_lines, texts in map_lines.values():
for line in sub_lines:
line.set_color(line_color)
for text in texts:
text.set_color(line_color)
@property
def color_with_creds(self):
return self.get_color('map_marker1', ColorHexCode.RED)
@property
def color_without_creds(self):
return self.get_color('map_marker2', ColorHexCode.YELLOW)
@export_graph_provider
class CampaignGraphVisitsMapUSA(CampaignGraphVisitsMap):
"""Display a map of the USA which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (USA)'
draw_states = True
basemap_args = dict(projection='lcc', lat_1=30, lon_0=-90, llcrnrlon=-122.5, llcrnrlat=12.5, urcrnrlon=-45, urcrnrlat=50)
@export_graph_provider
class CampaignGraphVisitsMapWorld(CampaignGraphVisitsMap):
"""Display a map of the world which shows the locations of visit origins."""
name_human = 'Map - Visit Locations (World)'
basemap_args = dict(projection='kav7', lon_0=0)
@export_graph_provider
class CampaignGraphPasswordComplexityPie(CampaignPieGraph):
"""Display a graph which displays the number of passwords which meet standard complexity requirements."""
graph_title = 'Campaign Password Complexity'
name_human = 'Pie - Password Complexity'
def _load_graph(self, info_cache):
passwords = set(cred['node']['password'] for cred in info_cache['credentials']['edges'])
if not len(passwords):
self._graph_null_pie('No Credential Information')
return
ctr = collections.Counter()
ctr.update(self._check_complexity(password) for password in passwords)
self.graph_pie((ctr[True], ctr[False]), autopct='%1.1f%%', legend_labels=('Complex', 'Not Complex'))
return
def _check_complexity(self, password):
if len(password) < 8:
return False
met = 0
for char_set in (string.ascii_uppercase, string.ascii_lowercase, string.digits, string.punctuation):
for char in password:
if char in char_set:
met += 1
break
return met >= 3
class CampaignGraphComparison(GraphBase):
"""Display selected campaigns data by order of campaign start date."""
graph_title = 'Campaign Comparison Graph'
name_human = 'Graph'
def __init__(self, *args, **kwargs):
super(CampaignGraphComparison, self).__init__(*args, **kwargs)
ax = self.axes[0]
self.axes.append(ax.twinx())
ax2 = self.axes[1]
self._config_axes(ax, ax2)
self._campaigns = []
def _calc(self, stats, key, comp_key='messages'):
return 0 if stats[comp_key] == 0 else (float(stats[key]) / stats[comp_key]) * 100
def _config_axes(self, ax, ax2):
# define the necessary colors
color_bg = self.get_color('bg', ColorHexCode.WHITE)
color_fg = self.get_color('fg', ColorHexCode.BLACK)
color_line_bg = self.get_color('line_bg', ColorHexCode.WHITE)
ax.tick_params(
axis='both',
which='both',
colors=color_fg,
top=False,
bottom=False
)
ax2.tick_params(
axis='both',
which='both',
colors=color_fg,
top=False,
bottom=False
)
ax.set_facecolor(color_line_bg)
ax2.set_facecolor(color_line_bg)
title = pyplot.title('Campaign Comparison', color=color_fg, size=self.markersize_scale * 1.75, loc='left')
title.set_position([0.075, 1.05])
ax.set_ylabel('Percent Visits/Credentials', color=color_fg, size=self.markersize_scale * 1.5)
ax.set_xlabel('Campaign Name', color=color_fg, size=self.markersize_scale * 1.5)
self._ax_hide_ticks(ax)
self._ax_hide_ticks(ax2)
ax2.set_ylabel('Messages', color=color_fg, size=self.markersize_scale * 1.25, rotation=270, labelpad=20)
self._ax_set_spine_color(ax, color_bg)
self._ax_set_spine_color(ax2, color_bg)
ax2.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.tick_params(axis='x', labelsize=10, pad=5)
def load_graph(self, campaigns):
"""
Load the information to compare the specified and paint it to the
canvas. Campaigns are graphed on the X-axis in the order that they are
provided. No sorting of campaigns is done by this method.
:param tuple campaigns: A tuple containing campaign IDs to compare.
"""
ax = self.axes[0]
ax2 = self.axes[1]
ax.clear()
ax2.clear()
self._config_axes(ax, ax2)
rpc = self.rpc
ellipsize = lambda text: (text if len(text) < 20 else text[:17] + '...')
visits_line_color = self.get_color('line_fg', ColorHexCode.RED)
creds_line_color = self.get_color('map_marker1', ColorHexCode.BLACK)
messages_color = '#046D8B'
trained_color = '#77c67f'
ax.grid(True)
ax.set_xticks(range(len(campaigns)))
ax.set_xticklabels([ellipsize(self._get_graphql_campaign_name(cid)) for cid in campaigns])
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(self.markersize_scale * 1.25)
labels = ax.get_xticklabels()
pyplot.setp(labels, rotation=15)
self._campaigns = campaigns
campaigns = [rpc('/campaign/stats', cid) for cid in campaigns]
ax2.plot([stats['messages'] for stats in campaigns], label='Messages', color=messages_color, lw=3)
if sum(stats['messages-trained'] for stats in campaigns):
ax.plot([self._calc(stats, 'messages-trained', 'visits-unique') for stats in campaigns], label='Trained (Visited)', color=trained_color, lw=3)
ax.plot([self._calc(stats, 'messages-trained') for stats in campaigns], label='Trained (All)', color=trained_color, lw=3, ls='dashed')
ax.plot([self._calc(stats, 'visits') for stats in campaigns], label='Visits', color=visits_line_color, lw=3)
ax.plot([self._calc(stats, 'visits-unique') for stats in campaigns], label='Unique Visits', color=visits_line_color, lw=3, ls='dashed')
if sum(stats['credentials'] for stats in campaigns):
ax.plot([self._calc(stats, 'credentials') for stats in campaigns], label='Credentials', color=creds_line_color, lw=3)
ax.plot([self._calc(stats, 'credentials-unique') for stats in campaigns], label='Unique Credentials', color=creds_line_color, lw=3, ls='dashed')
ax.set_ylim((0, 100))
ax2.set_ylim(bottom=0)
self.canvas.set_size_request(500 + 50 * (len(campaigns) - 1), 500)
legend_patch = [
(visits_line_color, 'solid', 'Visits'),
(visits_line_color, 'dotted', 'Unique Visits')
]
if sum(stats['credentials'] for stats in campaigns):
legend_patch.extend([
(creds_line_color, 'solid', 'Credentials'),
(creds_line_color, 'dotted', 'Unique Credentials')
])
if sum(stats['messages-trained'] for stats in campaigns):
legend_patch.extend([
(trained_color, 'solid', 'Trained (Visited)'),
(trained_color, 'dotted', 'Trained (All)')
])
legend_patch.append(
(messages_color, 'solid', 'Messages')
)
self.add_legend_patch(legend_patch)
pyplot.tight_layout()
def _get_graphql_campaign_name(self, campaign_id=None):
results = self.rpc.graphql("""\
query getCampaignName($id: String!) {
db {
campaign(id: $id) {
name
}
}
}""", {'id': campaign_id or self.config['campaign_id']})
return results['db']['campaign']['name']
def add_legend_patch(self, legend_rows, fontsize=None):
if matplotlib.__version__ == '3.0.2':
self.logger.warning('skipping legend patch with matplotlib v3.0.2 for compatibility')
return
if self._legend is not None:
self._legend.remove()
self._legend = None
legend_bbox = self.figure.legend(
tuple(lines.Line2D([], [], color=patch_color, lw=3, ls=style) for patch_color, style, _ in legend_rows),
tuple(label for _, _, label in legend_rows),
borderaxespad=1,
columnspacing=1.5,
fontsize=self.fontsize_scale,
ncol=3,
frameon=True,
handlelength=2,
handletextpad=0.5,
labelspacing=0.5,
loc='upper right'
)
legend_bbox.get_frame().set_facecolor(self.get_color('line_bg', ColorHexCode.GRAY))
for text in legend_bbox.get_texts():
text.set_color('white')
legend_bbox.legendPatch.set_linewidth(0)
self._legend = legend_bbox
def refresh(self):
self.load_graph(self._campaigns)
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
krafczyk/spack | var/spack/repos/builtin/packages/py-dask/package.py | 4 | 3626 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDask(PythonPackage):
"""Dask is a flexible parallel computing library for analytics."""
homepage = "https://github.com/dask/dask/"
url = "https://pypi.io/packages/source/d/dask/dask-0.17.4.tar.gz"
version('0.17.4', '4a7b9c5d7ddf52639b1c6b9e8a68d146')
version('0.8.1', '5dd8e3a3823b3bc62c9a6d192e2cb5b4')
variant('array', default=True, description='Install requirements for dask.array')
variant('bag', default=True, description='Install requirements for dask.bag')
variant('dataframe', default=True, description='Install requirements for dask.dataframe')
variant('delayed', default=True, description='Install requirements for dask.delayed')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='test')
depends_on('py-requests', type='test')
# Requirements for dask.array
depends_on('[email protected]:', type=('build', 'run'), when='+array')
depends_on('[email protected]:', type=('build', 'run'), when='+array')
# Requirements for dask.bag
depends_on('[email protected]:', type=('build', 'run'), when='+bag')
depends_on('[email protected]:', type=('build', 'run'), when='+bag')
depends_on('[email protected]:', type=('build', 'run'), when='+bag')
# Requirements for dask.dataframe
depends_on('[email protected]:', type=('build', 'run'), when='+dataframe')
depends_on('[email protected]:', type=('build', 'run'), when='+dataframe')
depends_on('[email protected]:', type=('build', 'run'), when='+dataframe')
depends_on('[email protected]:', type=('build', 'run'), when='+dataframe')
depends_on('[email protected]:', type=('build', 'run'), when='+dataframe')
# Requirements for dask.delayed
depends_on('[email protected]:', type=('build', 'run'), when='+delayed')
@property
def import_modules(self):
modules = [
'dask', 'dask.bytes', 'dask.diagnostics', 'dask.store'
]
if '+array' in self.spec:
modules.append('dask.array')
if '+bag' in self.spec:
modules.append('dask.bag')
if '+dataframe' in self.spec:
modules.extend([
'dask.dataframe', 'dask.dataframe.io', 'dask.dataframe.tseries'
])
return modules
| lgpl-2.1 |
dogwood008/DeepFX | deep_fx.py | 1 | 6007 |
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
from logging import getLogger, StreamHandler, DEBUG, INFO
import time
import os
import warnings
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, InputLayer
from keras.optimizers import Adam
from keras.initializers import TruncatedNormal
from rl.agents.dqn import DQNAgent
from rl.policy import EpsGreedyQPolicy
from rl.memory import SequentialMemory
from debug_tools import DebugTools
from hist_data import HistData
from episode_logger import EpisodeLogger
from model_saver import ModelSaver
from my_tensor_board import MyTensorBoard
from fx_trade import FXTrade
# In[ ]:
class DeepFX:
def __init__(self, env, steps=50000,
log_directory='./logs', model_directory='./models',
model_filename='Keras-RL_DQN_FX_model_meanq{mean_q:e}_episode{episode:05d}',
prepared_model_filename=None,
weights_filename='Keras-RL_DQN_FX_weights.h5',
logger=None):
self._log_directory = log_directory
self._model_directory = model_directory
self._model_filename = model_filename
self._prepared_model_filename = prepared_model_filename
self._weights_filename = weights_filename
self._load_model_path = self._relative_path(model_directory, prepared_model_filename)
self._save_model_path = self._relative_path(model_directory, model_filename)
self._env = env
self.steps = steps
self._logger = logger
def setup(self):
self._agent, self._model, self._memory, self._policy = self._initialize_agent()
self._agent.compile('adam')
self._logger.info(self._model.summary())
def train(self, is_for_time_measurement=False, wipe_instance_variables_after=True):
self.setup()
self._callbacks = self._get_callbacks()
self._fit(self._agent, is_for_time_measurement, self._env, self._callbacks)
if wipe_instance_variables_after:
self._wipe_instance_variables()
def test(self, callbacks=[], wipe_instance_variables_after=True):
self.setup()
self._agent.test(self._env, visualize=False, callbacks=callbacks)
#%matplotlib inline
#import matplotlib.pyplot as plt
#
#for obs in callbacks[0].rewards.values():
# plt.plot([o for o in obs])
#plt.xlabel("step")
#plt.ylabel("reward")
#if wipe_instance_variables_after:
# self._wipe_instance_variables()
def _wipe_instance_variables(self):
self._callbacks, self._agent, self._model, self._memory, self._policy, self.env = [None] * 6
def _relative_path(self, directory, filename):
if directory is None or filename is None:
return None
return os.path.join(directory, filename)
def _get_model(self, load_model_path, observation_space_shape, nb_actions):
if load_model_path is None:
# DQNのネットワーク定義
# ref: https://github.com/googledatalab/notebooks/blob/master/samples/TensorFlow/Machine%20Learning%20with%20Financial%20Data.ipynb
model = Sequential()
model.add(Flatten(input_shape=(1,) + observation_space_shape))
#model.add(InputLayer(input_shape=(1,) + observation_space_shape))
model.add(Dense(50, activation='relu', kernel_initializer=TruncatedNormal(stddev=0.0001), bias_initializer='ones'))
model.add(Dense(25, activation='relu', kernel_initializer=TruncatedNormal(stddev=0.0001), bias_initializer='ones'))
model.add(Dense(nb_actions, activation='linear'))
else:
model = keras.models.load_model(load_model_path)
return model
def _initialize_agent(self):
nb_actions = self._env.action_space.n
observation_space_shape = self._env.observation_space.shape
model = self._get_model(self._load_model_path, observation_space_shape, nb_actions)
# experience replay用のmemory
memory = SequentialMemory(limit=500000, window_length=1)
# 行動方策はオーソドックスなepsilon-greedy。ほかに、各行動のQ値によって確率を決定するBoltzmannQPolicyが利用可能
policy = EpsGreedyQPolicy(eps=0.1)
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
policy=policy)
#target_model_update=1e-2, policy=policy)
#dqn.compile(Adam(lr=1e-3))
return (dqn, model, memory, policy)
def _get_callbacks(self):
tensor_board_callback = MyTensorBoard(log_dir=self._log_directory, histogram_freq=1, embeddings_layer_names=True, write_graph=True)
model_saver_callback = ModelSaver(self._save_model_path, monitor='mean_q', mode='max', logger=self._logger)
episode_logger_callback = EpisodeLogger(logger=self._logger)
callbacks = [tensor_board_callback, model_saver_callback, episode_logger_callback]
return callbacks
def _fit(self, agent, is_for_time_measurement, env, callbacks=[]):
if is_for_time_measurement:
start = time.time()
self._logger.info(DebugTools.now_str())
history = agent.fit(env, nb_steps=self.steps, visualize=False, verbose=2, nb_max_episode_steps=None, callbacks=callbacks)
elapsed_time = time.time() - start
self._logger.warn(("elapsed_time:{0}".format(elapsed_time)) + "[sec]")
self._logger.info(DebugTools.now_str())
else:
history = agent.fit(env, nb_steps=50000, visualize=True, verbose=2, nb_max_episode_steps=None)
#学習の様子を描画したいときは、Envに_render()を実装して、visualize=True にします,
def _render(self, mode='human', close=False):
import pdb; pdb.set_trace()
| mit |
JosmanPS/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
bthirion/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/util/hashing.py | 3 | 9194 | """
data hash pandas / numpy objects
"""
import itertools
import numpy as np
from pandas._libs import hashing
from pandas._libs.lib import is_bool_array
from pandas.core.dtypes.generic import (
ABCMultiIndex,
ABCIndexClass,
ABCSeries,
ABCDataFrame)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_numeric_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_list_like)
# 16 byte long hashing key
_default_hash_key = '0123456789123456'
def _combine_hash_arrays(arrays, num_items):
"""
Parameters
----------
arrays : generator
num_items : int
Should be the same as CPython's tupleobject.c
"""
try:
first = next(arrays)
except StopIteration:
return np.array([], dtype=np.uint64)
arrays = itertools.chain([first], arrays)
mult = np.uint64(1000003)
out = np.zeros_like(first) + np.uint64(0x345678)
for i, a in enumerate(arrays):
inverse_i = num_items - i
out ^= a
out *= mult
mult += np.uint64(82520 + inverse_i + inverse_i)
assert i + 1 == num_items, 'Fed in wrong num_items'
out += np.uint64(97531)
return out
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None,
categorize=True):
"""
Return a data hash of the Index/Series/DataFrame
.. versionadded:: 0.19.2
Parameters
----------
index : boolean, default True
include the index in the hash (if Series/DataFrame)
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
Series of uint64, same length as the object
"""
from pandas import Series
if hash_key is None:
hash_key = _default_hash_key
if isinstance(obj, ABCMultiIndex):
return Series(hash_tuples(obj, encoding, hash_key),
dtype='uint64', copy=False)
if isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
h = Series(h, index=obj, dtype='uint64', copy=False)
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key,
categorize).astype('uint64', copy=False)
if index:
index_iter = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values
for _ in [None])
arrays = itertools.chain([h], index_iter)
h = _combine_hash_arrays(arrays, 2)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
elif isinstance(obj, ABCDataFrame):
hashes = (hash_array(series.values) for _, series in obj.iteritems())
num_items = len(obj.columns)
if index:
index_hash_generator = (hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key,
categorize=categorize).values # noqa
for _ in [None])
num_items += 1
hashes = itertools.chain(hashes, index_hash_generator)
h = _combine_hash_arrays(hashes, num_items)
h = Series(h, index=obj.index, dtype='uint64', copy=False)
else:
raise TypeError("Unexpected type for hashing %s" % type(obj))
return h
def hash_tuples(vals, encoding='utf8', hash_key=None):
"""
Hash an MultiIndex / list-of-tuples efficiently
.. versionadded:: 0.20.0
Parameters
----------
vals : MultiIndex, list-of-tuples, or single tuple
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array
"""
is_tuple = False
if isinstance(vals, tuple):
vals = [vals]
is_tuple = True
elif not is_list_like(vals):
raise TypeError("must be convertible to a list-of-tuples")
from pandas import Categorical, MultiIndex
if not isinstance(vals, ABCMultiIndex):
vals = MultiIndex.from_tuples(vals)
# create a list-of-Categoricals
vals = [Categorical(vals.labels[level],
vals.levels[level],
ordered=False,
fastpath=True)
for level in range(vals.nlevels)]
# hash the list-of-ndarrays
hashes = (_hash_categorical(cat,
encoding=encoding,
hash_key=hash_key)
for cat in vals)
h = _combine_hash_arrays(hashes, len(vals))
if is_tuple:
h = h[0]
return h
def _hash_categorical(c, encoding, hash_key):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
hashed = hash_array(c.categories.values, encoding, hash_key,
categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construt the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isnull()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype='uint64')
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
"""
Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, 'dtype'):
raise TypeError("must pass a ndarray-like")
if hash_key is None:
hash_key = _default_hash_key
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke.
if is_categorical_dtype(vals.dtype):
return _hash_categorical(vals, encoding, hash_key)
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(vals.dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
if is_bool_array(vals):
vals = vals.astype('u8')
elif (is_datetime64_dtype(vals) or
is_timedelta64_dtype(vals)):
vals = vals.view('i8').astype('u8', copy=False)
elif (is_numeric_dtype(vals) and vals.dtype.itemsize <= 8):
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
# we have mixed types
vals = hashing.hash_object_array(vals.astype(str).astype(object),
hash_key, encoding)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals
| mit |
villalonreina/dipy | dipy/reconst/sfm.py | 6 | 20391 | """
The Sparse Fascicle Model.
This is an implementation of the sparse fascicle model described in
[Rokem2015]_. The multi b-value version of this model is described in
[Rokem2014]_.
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Rokem2014] Ariel Rokem, Kimberly L. Chan, Jason D. Yeatman, Franco
Pestilli, Brian A. Wandell (2014). Evaluating the accuracy of diffusion
models at multiple b-values with cross-validation. ISMRM 2014.
"""
import warnings
import numpy as np
try:
from numpy import nanmean
except ImportError:
from scipy.stats import nanmean
from dipy.utils.optpkg import optional_package
import dipy.core.geometry as geo
import dipy.core.gradients as grad
import dipy.core.optimize as opt
import dipy.sims.voxel as sims
import dipy.reconst.dti as dti
import dipy.data as dpd
from dipy.reconst.base import ReconstModel, ReconstFit
from dipy.reconst.cache import Cache
from dipy.core.onetime import auto_attr
lm, has_sklearn, _ = optional_package('sklearn.linear_model')
# If sklearn is unavailable, we can fall back on nnls (but we also warn the
# user that we are about to do that):
if not has_sklearn:
w = "sklearn is not available, you can use 'nnls' method to fit"
w += " the SparseFascicleModel"
warnings.warn(w)
# Isotropic signal models: these are models of the part of the signal that
# changes with b-value, but does not change with direction. This collection is
# extensible, by inheriting from IsotropicModel/IsotropicFit below:
# First, a helper function to derive the fit signal for these models:
def _to_fit_iso(data, gtab):
data_no_b0 = data[..., ~gtab.b0s_mask]
nzb0 = data_no_b0 > 0
nzb0_idx = np.where(nzb0)
zb0_idx = np.where(~nzb0)
if np.sum(gtab.b0s_mask) > 0:
s0 = np.mean(data[..., gtab.b0s_mask], -1)
to_fit = np.empty(data_no_b0.shape)
to_fit[nzb0_idx] = data_no_b0[nzb0_idx] / s0[nzb0_idx[0]]
to_fit[zb0_idx] = 0
else:
to_fit = data_no_b0
return to_fit
class IsotropicModel(ReconstModel):
"""
A base-class for the representation of isotropic signals.
The default behavior, suitable for single b-value data is to calculate the
mean in each voxel as an estimate of the signal that does not depend on
direction.
"""
def __init__(self, gtab):
"""
Initialize an IsotropicModel.
Parameters
----------
gtab : a GradientTable class instance
"""
ReconstModel.__init__(self, gtab)
def fit(self, data):
"""
Fit an IsotropicModel.
This boils down to finding the mean diffusion-weighted signal in each
voxel
Parameters
----------
data : ndarray
Returns
-------
IsotropicFit class instance.
"""
to_fit = _to_fit_iso(data, self.gtab)
params = np.mean(np.reshape(to_fit, (-1, to_fit.shape[-1])), -1)
return IsotropicFit(self, params)
class IsotropicFit(ReconstFit):
"""
A fit object for representing the isotropic signal as the mean of the
diffusion-weighted signal.
"""
def __init__(self, model, params):
"""
Initialize an IsotropicFit object.
Parameters
----------
model : IsotropicModel class instance
params : ndarray
The mean isotropic model parameters (the mean diffusion-weighted
signal in each voxel).
n_vox : int
The number of voxels for which the fit was done.
"""
self.model = model
self.params = params
def predict(self, gtab=None):
"""
Predict the isotropic signal.
Based on a gradient table. In this case, the (naive!) prediction will
be the mean of the diffusion-weighted signal in the voxels.
Parameters
----------
gtab : a GradientTable class instance (optional)
Defaults to use the gtab from the IsotropicModel from which this
fit was derived.
"""
if gtab is None:
gtab = self.model.gtab
return self.params[..., np.newaxis] + np.zeros((self.params.shape[0],
np.sum(~gtab.b0s_mask))
)
class ExponentialIsotropicModel(IsotropicModel):
"""
Representing the isotropic signal as a fit to an exponential decay function
with b-values
"""
def fit(self, data):
"""
Parameters
----------
data : ndarray
Returns
-------
ExponentialIsotropicFit class instance.
"""
to_fit = _to_fit_iso(data, self.gtab)
# Fitting to the log-transformed relative data is much faster:
nz_idx = to_fit > 0
to_fit[nz_idx] = np.log(to_fit[nz_idx])
to_fit[~nz_idx] = -np.inf
p = nanmean(to_fit / self.gtab.bvals[~self.gtab.b0s_mask], -1)
params = -p
return ExponentialIsotropicFit(self, params)
class ExponentialIsotropicFit(IsotropicFit):
"""
A fit to the ExponentialIsotropicModel object, based on data.
"""
def predict(self, gtab=None):
"""
Predict the isotropic signal, based on a gradient table. In this case,
the prediction will be for an exponential decay with the mean
diffusivity derived from the data that was fit.
Parameters
----------
gtab : a GradientTable class instance (optional)
Defaults to use the gtab from the IsotropicModel from which this
fit was derived.
"""
if gtab is None:
gtab = self.model.gtab
return np.exp(-gtab.bvals[~gtab.b0s_mask] *
(np.zeros((self.params.shape[0],
np.sum(~gtab.b0s_mask))) +
self.params[..., np.newaxis]))
def sfm_design_matrix(gtab, sphere, response, mode='signal'):
"""
Construct the SFM design matrix
Parameters
----------
gtab : GradientTable or Sphere
Sets the rows of the matrix, if the mode is 'signal', this should be a
GradientTable. If mode is 'odf' this should be a Sphere
sphere : Sphere
Sets the columns of the matrix
response : list of 3 elements
The eigenvalues of a tensor which will serve as a kernel
function.
mode : str {'signal' | 'odf'}, optional
Choose the (default) 'signal' for a design matrix containing predicted
signal in the measurements defined by the gradient table for putative
fascicles oriented along the vertices of the sphere. Otherwise, choose
'odf' for an odf convolution matrix, with values of the odf calculated
from a tensor with the provided response eigenvalues, evaluated at the
b-vectors in the gradient table, for the tensors with prinicipal
diffusion directions along the vertices of the sphere.
Returns
-------
mat : ndarray
A design matrix that can be used for one of the following operations:
when the 'signal' mode is used, each column contains the putative
signal in each of the bvectors of the `gtab` if a fascicle is oriented
in the direction encoded by the sphere vertex corresponding to this
column. This is used for deconvolution with a measured DWI signal. If
the 'odf' mode is chosen, each column instead contains the values of
the tensor ODF for a tensor with a principal diffusion direction
corresponding to this vertex. This is used to generate odfs from the
fits of the SFM for the purpose of tracking.
Examples
--------
>>> import dipy.data as dpd
>>> data, gtab = dpd.dsi_voxels()
>>> sphere = dpd.get_sphere()
>>> from dipy.reconst.sfm import sfm_design_matrix
A canonical tensor approximating corpus-callosum voxels [Rokem2014]_:
>>> tensor_matrix = sfm_design_matrix(gtab, sphere,
... [0.0015, 0.0005, 0.0005])
A 'stick' function ([Behrens2007]_):
>>> stick_matrix = sfm_design_matrix(gtab, sphere, [0.001, 0, 0])
Notes
-----
.. [Rokem2015] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2015). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Rokem2014] Ariel Rokem, Kimberly L. Chan, Jason D. Yeatman, Franco
Pestilli, Brian A. Wandell (2014). Evaluating the accuracy of diffusion
models at multiple b-values with cross-validation. ISMRM 2014.
.. [Behrens2007] Behrens TEJ, Berg HJ, Jbabdi S, Rushworth MFS, Woolrich MW
(2007): Probabilistic diffusion tractography with multiple fibre
orientations: What can we gain? Neuroimage 34:144-55.
"""
if mode == 'signal':
mat_gtab = grad.gradient_table(gtab.bvals[~gtab.b0s_mask],
gtab.bvecs[~gtab.b0s_mask])
# Preallocate:
mat = np.empty((np.sum(~gtab.b0s_mask),
sphere.vertices.shape[0]))
elif mode == 'odf':
mat = np.empty((gtab.x.shape[0], sphere.vertices.shape[0]))
# Calculate column-wise:
for ii, this_dir in enumerate(sphere.vertices):
# Rotate the canonical tensor towards this vertex and calculate the
# signal you would have gotten in the direction
evecs = sims.all_tensor_evecs(this_dir)
if mode == 'signal':
sig = sims.single_tensor(mat_gtab, evals=response, evecs=evecs)
# For regressors based on the single tensor, remove $e^{-bD}$
iso_sig = np.exp(-mat_gtab.bvals * np.mean(response))
mat[:, ii] = sig - iso_sig
elif mode == 'odf':
# Stick function
if response[1] == 0 or response[2] == 0:
jj = sphere.find_closest(evecs[0])
mat[jj, ii] = 1
else:
odf = sims.single_tensor_odf(gtab.vertices,
evals=response, evecs=evecs)
mat[:, ii] = odf
return mat
class SparseFascicleModel(ReconstModel, Cache):
def __init__(self, gtab, sphere=None, response=[0.0015, 0.0005, 0.0005],
solver='ElasticNet', l1_ratio=0.5, alpha=0.001, isotropic=None):
"""
Initialize a Sparse Fascicle Model
Parameters
----------
gtab : GradientTable class instance
sphere : Sphere class instance, optional
A sphere on which coefficients will be estimated. Default:
symmetric sphere with 362 points (from :mod:`dipy.data`).
response : (3,) array-like, optional
The eigenvalues of a canonical tensor to be used as the response
function of single-fascicle signals.
Default:[0.0015, 0.0005, 0.0005]
solver : string, dipy.core.optimize.SKLearnLinearSolver object, or sklearn.linear_model.base.LinearModel object, optional.
This will determine the algorithm used to solve the set of linear
equations underlying this model. If it is a string it needs to be
one of the following: {'ElasticNet', 'NNLS'}. Otherwise, it can be
an object that inherits from `dipy.optimize.SKLearnLinearSolver`.
Default: 'ElasticNet'.
l1_ratio : float, optional
Sets the balance betwee L1 and L2 regularization in ElasticNet
[Zou2005]_. Default: 0.5
alpha : float, optional
Sets the balance between least-squares error and L1/L2
regularization in ElasticNet [Zou2005]_. Default: 0.001
isotropic : IsotropicModel class instance
This is a class that implements the function that calculates the
value of the isotropic signal. This is a value of the signal that is
independent of direction, and therefore removed from both sides of
the SFM equation. The default is an instance of IsotropicModel, but
other functions can be inherited from IsotropicModel to implement
other fits to the aspects of the data that depend on b-value, but
not on direction.
Notes
-----
This is an implementation of the SFM, described in [Rokem2015]_.
.. [Rokem2014] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2014). Evaluating the accuracy of diffusion MRI models in white
matter. PLoS ONE 10(4): e0123272. doi:10.1371/journal.pone.0123272
.. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
selection via the elastic net. J R Stat Soc B:301-320
"""
ReconstModel.__init__(self, gtab)
if sphere is None:
sphere = dpd.get_sphere()
self.sphere = sphere
self.response = np.asarray(response)
if isotropic is None:
isotropic = IsotropicModel
self.isotropic = isotropic
if solver == 'ElasticNet':
self.solver = lm.ElasticNet(l1_ratio=l1_ratio, alpha=alpha,
positive=True, warm_start=True)
elif solver == 'NNLS' or solver == 'nnls':
self.solver = opt.NonNegativeLeastSquares()
elif (isinstance(solver, opt.SKLearnLinearSolver) or
has_sklearn and isinstance(solver, lm.base.LinearModel)):
self.solver = solver
else:
e_s = "The `solver` key-word argument needs to be: "
e_s += "'ElasticNet', 'NNLS', or a "
e_s += "`dipy.optimize.SKLearnLinearSolver` object"
raise ValueError(e_s)
@auto_attr
def design_matrix(self):
"""
The design matrix for a SFM.
Returns
-------
ndarray
The design matrix, where each column is a rotated version of the
response function.
"""
return sfm_design_matrix(self.gtab, self.sphere, self.response,
'signal')
def fit(self, data, mask=None):
"""
Fit the SparseFascicleModel object to data.
Parameters
----------
data : array
The measured signal.
mask : array, optional
A boolean array used to mark the coordinates in the data that
should be analyzed. Has the shape `data.shape[:-1]`. Default: None,
which implies that all points should be analyzed.
Returns
-------
SparseFascicleFit object
"""
if mask is None:
# Flatten it to 2D either way:
data_in_mask = np.reshape(data, (-1, data.shape[-1]))
else:
# Check for valid shape of the mask
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
data_in_mask = np.reshape(data[mask], (-1, data.shape[-1]))
# Fitting is done on the relative signal (S/S0):
flat_S0 = np.mean(data_in_mask[..., self.gtab.b0s_mask], -1)
flat_S = (data_in_mask[..., ~self.gtab.b0s_mask] /
flat_S0[..., None])
isotropic = self.isotropic(self.gtab).fit(data_in_mask)
flat_params = np.zeros((data_in_mask.shape[0],
self.design_matrix.shape[-1]))
isopredict = isotropic.predict()
for vox, vox_data in enumerate(flat_S):
# In voxels in which S0 is 0, we just want to keep the
# parameters at all-zeros, and avoid nasty sklearn errors:
if not (np.any(~np.isfinite(vox_data)) or np.all(vox_data == 0)):
fit_it = vox_data - isopredict[vox]
flat_params[vox] = self.solver.fit(self.design_matrix,
fit_it).coef_
if mask is None:
out_shape = data.shape[:-1] + (-1, )
beta = flat_params.reshape(out_shape)
S0 = flat_S0.reshape(data.shape[:-1])
else:
beta = np.zeros(data.shape[:-1] +
(self.design_matrix.shape[-1],))
beta[mask, :] = flat_params
S0 = np.zeros(data.shape[:-1])
S0[mask] = flat_S0
return SparseFascicleFit(self, beta, S0, isotropic)
class SparseFascicleFit(ReconstFit):
def __init__(self, model, beta, S0, iso):
"""
Initalize a SparseFascicleFit class instance
Parameters
----------
model : a SparseFascicleModel object.
beta : ndarray
The parameters of fit to data.
S0 : ndarray
The mean non-diffusion-weighted signal.
iso : IsotropicFit class instance
A representation of the isotropic signal, together with parameters
of the isotropic signal in each voxel, that is capable of
deriving/predicting an isotropic signal, based on a gradient-table.
"""
self.model = model
self.beta = beta
self.S0 = S0
self.iso = iso
def odf(self, sphere):
"""
The orientation distribution function of the SFM
Parameters
----------
sphere : Sphere
The points in which the ODF is evaluated
Returns
-------
odf : ndarray of shape (x, y, z, sphere.vertices.shape[0])
"""
odf_matrix = self.model.cache_get('odf_matrix', key=sphere)
if odf_matrix is None:
odf_matrix = sfm_design_matrix(sphere, self.model.sphere,
self.model.response, mode='odf')
self.model.cache_set('odf_matrix', key=sphere, value=odf_matrix)
flat_beta = self.beta.reshape(-1, self.beta.shape[-1])
flat_odf = np.dot(odf_matrix, flat_beta.T)
return flat_odf.T.reshape(self.beta.shape[:-1] +
(odf_matrix.shape[0], ))
def predict(self, gtab=None, response=None, S0=None):
"""
Predict the signal based on the SFM parameters
Parameters
----------
gtab : GradientTable, optional
The bvecs/bvals to predict the signal on. Default: the gtab from
the model object.
response : list of 3 elements, optional
The eigenvalues of a tensor which will serve as a kernel
function. Default: the response of the model object. Default to use
`model.response`.
S0 : float or array, optional
The non-diffusion-weighted signal. Default: use the S0 of the data
Returns
-------
pred_sig : ndarray
The signal predicted in each voxel/direction
"""
if response is None:
response = self.model.response
if gtab is None:
_matrix = self.model.design_matrix
gtab = self.model.gtab
# The only thing we can't change at this point is the sphere we use
# (which sets the width of our design matrix):
else:
_matrix = sfm_design_matrix(gtab, self.model.sphere, response)
# Get them all at once:
beta_all = self.beta.reshape(-1, self.beta.shape[-1])
pred_weighted = np.dot(_matrix, beta_all.T).T
pred_weighted = pred_weighted.reshape(self.beta.shape[:-1] +
(_matrix.shape[0],))
if S0 is None:
S0 = self.S0
if isinstance(S0, np.ndarray):
S0 = S0[..., None]
iso_signal = self.iso.predict(gtab)
pre_pred_sig = S0 * (pred_weighted +
iso_signal.reshape(pred_weighted.shape))
pred_sig = np.zeros(pre_pred_sig.shape[:-1] + (gtab.bvals.shape[0],))
pred_sig[..., ~gtab.b0s_mask] = pre_pred_sig
pred_sig[..., gtab.b0s_mask] = S0
return pred_sig.squeeze()
| bsd-3-clause |
rkmaddox/mne-python | examples/preprocessing/eeg_csd.py | 13 | 3365 | """
=====================================================
Transform EEG data using current source density (CSD)
=====================================================
This script shows an example of how to use CSD
:footcite:`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`.
CSD takes the spatial Laplacian of the sensor signal (derivative in both
x and y). It does what a planar gradiometer does in MEG. Computing these
spatial derivatives reduces point spread. CSD transformed data have a sharper
or more distinct topography, reducing the negative impact of volume conduction.
"""
# Authors: Alex Rockhill <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Load sample subject data
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
raw = raw.pick_types(meg=False, eeg=True, eog=True, ecg=True, stim=True,
exclude=raw.info['bads']).load_data()
events = mne.find_events(raw)
raw.set_eeg_reference(projection=True).apply_proj()
###############################################################################
# Plot the raw data and CSD-transformed raw data:
raw_csd = mne.preprocessing.compute_current_source_density(raw)
raw.plot()
raw_csd.plot()
###############################################################################
# Also look at the power spectral densities:
raw.plot_psd()
raw_csd.plot_psd()
###############################################################################
# CSD can also be computed on Evoked (averaged) data.
# Here we epoch and average the data so we can demonstrate that.
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=-0.2, tmax=.5,
preload=True)
evoked = epochs['auditory'].average()
###############################################################################
# First let's look at how CSD affects scalp topography:
times = np.array([-0.1, 0., 0.05, 0.1, 0.15])
evoked_csd = mne.preprocessing.compute_current_source_density(evoked)
evoked.plot_joint(title='Average Reference', show=False)
evoked_csd.plot_joint(title='Current Source Density')
###############################################################################
# CSD has parameters ``stiffness`` and ``lambda2`` affecting smoothing and
# spline flexibility, respectively. Let's see how they affect the solution:
fig, ax = plt.subplots(4, 4)
fig.subplots_adjust(hspace=0.5)
fig.set_size_inches(10, 10)
for i, lambda2 in enumerate([0, 1e-7, 1e-5, 1e-3]):
for j, m in enumerate([5, 4, 3, 2]):
this_evoked_csd = mne.preprocessing.compute_current_source_density(
evoked, stiffness=m, lambda2=lambda2)
this_evoked_csd.plot_topomap(
0.1, axes=ax[i, j], outlines='skirt', contours=4, time_unit='s',
colorbar=False, show=False)
ax[i, j].set_title('stiffness=%i\nλ²=%s' % (m, lambda2))
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
ltiao/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ZiqianXY/MLEN | src/p0_titanic_survival_exploration/titanic_visualizations.py | 24 | 5425 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
| mit |
DTOcean/dtocean-core | dtocean_core/extensions.py | 1 | 21050 |
# Copyright (C) 2016-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Set up logging
import logging
module_logger = logging.getLogger(__name__)
import os
import pickle
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from aneris.utilities.plugins import Plugin
from . import strategies, tools
from .menu import ModuleMenu
class ExtensionManager(Plugin):
"""Extension framework discovery"""
def __init__(self, module, cls_name):
super(ExtensionManager, self).__init__()
self._plugin_classes = None
self._plugin_names = None
self._plugin_classes = self._discover_classes(module, cls_name)
self._plugin_names = self._discover_names()
return
def get_available(self):
plugin_names = self._plugin_names.keys()
return plugin_names
def _get_plugin(self, plugin_name):
if plugin_name not in self.get_available():
errStr = ("Name {} is not a recognised "
"plugin").format(plugin_name)
raise KeyError(errStr)
cls_name = self._plugin_names[plugin_name]
ExtensionCls = self._plugin_classes[cls_name]
ext_obj = ExtensionCls()
return ext_obj
def _discover_classes(self, module, cls_name):
'''Retrieve all of the available plugin classes'''
log_msg = 'Searching for {} classes'.format(cls_name)
module_logger.debug(log_msg)
cls_map = self._discover_plugins(module, cls_name, warn_import=True)
return cls_map
def _discover_names(self):
plugin_names = {}
# Work through the interfaces
for cls_name, cls_attr in self._plugin_classes.iteritems():
name = cls_attr.get_name()
plugin_names[name] = cls_name
return plugin_names
class StrategyManager(ExtensionManager):
"""Strategy discovery"""
def __init__(self, module=strategies, cls_name="Strategy"):
super(StrategyManager, self).__init__(module, cls_name)
self._module_menu = ModuleMenu()
return
def get_strategy(self, strategy_name):
if strategy_name not in self.get_available():
errStr = ("Name {} is not a recognised "
"strategy").format(strategy_name)
raise KeyError(errStr)
strategy_obj = self._get_plugin(strategy_name)
return strategy_obj
def get_level_values(self, core,
project,
var_id,
strategy=None,
sim_titles=None,
scope="global"):
if scope not in ["global", "local"]:
errStr = ("Argument 'scope' must have value 'global' or 'local', "
"not {}").format(scope)
raise ValueError(errStr)
chosen_modules = self._module_menu.get_active(core, project)
output_levels = ["{} {} output".format(x, scope).lower()
for x in chosen_modules]
# Mask all the global and local output levels before getting the
# required output level
force_masks = ["local", "global"]
if strategy is not None:
sim_indexes = strategy.get_simulation_record()
elif sim_titles is not None:
sim_indexes = project.get_simulation_indexes(sim_titles)
else:
sim_indexes = range(len(project))
if sim_titles is None:
sim_titles = [project.get_simulation_title(index=x)
for x in sim_indexes]
sim_levels = OrderedDict()
for sim_title, sim_index in zip(sim_titles, sim_indexes):
if sim_title is None: sim_title = sim_index
level_values = core.get_level_values(project,
var_id,
output_levels,
force_masks,
sim_index=sim_index)
sim_levels[sim_title] = level_values
return sim_levels
def get_level_values_df(self, core,
project,
var_id,
strategy=None,
sim_titles=None,
scope="global"):
sim_levels = self.get_level_values(core,
project,
var_id,
strategy,
sim_titles,
scope)
done_levels = self._module_menu.get_completed(core, project)
# Check the number of levels in each simulation
sim_lengths = []
for level_values in sim_levels.itervalues():
sim_lengths.append(len(level_values.values()))
level_set = set(sim_lengths)
if len(level_set) != 1:
errStr = "The number of levels in each simulation is not equal"
raise ValueError(errStr)
sim_names = []
level_lists = {k: [] for k in done_levels}
for sim_key, level_values in sim_levels.iteritems():
sim_names.append(sim_key)
for name in done_levels:
find_levels = [level for level in level_values
if name.lower() in level]
if len(find_levels) > 1:
errStr = ("More than one level matches module name "
"{}").format(name)
raise RuntimeError(errStr)
if len(find_levels) == 0:
value = np.nan
else:
found_level = find_levels[0]
value = level_values[found_level]
level_lists[name].append(value)
raw_dict = {"Simulation Name": sim_names}
raw_dict.update(level_lists)
raw_cols = ["Simulation Name"]
raw_cols.extend(done_levels)
df = pd.DataFrame(raw_dict)
df = df[raw_cols]
return df
def get_level_values_plot(self, core,
project,
var_id,
strategy=None,
sim_titles=None,
scope="global",
legend_loc='upper left',
max_lines=10):
sim_levels = self.get_level_values(core,
project,
var_id,
strategy,
sim_titles,
scope)
done_levels = self._module_menu.get_completed(core, project)
# Check the number of levels in each simulation
sim_lengths = []
for level_values in sim_levels.itervalues():
sim_lengths.append(len(level_values.values()))
level_set = set(sim_lengths)
if len(level_set) != 1:
errStr = "The number of levels in each simulation is not equal"
raise ValueError(errStr)
fig = plt.figure()
ax = fig.gca()
if sim_levels < 10:
num_plots = sim_levels
else:
num_plots = 10
colormap = plt.cm.Set1
colors = [colormap(i) for i in np.linspace(0, 1, num_plots)]
ax.set_prop_cycle(color=colors)
x = range(len(done_levels))
metadata = core.get_metadata(var_id)
for i, (sim_key, level_values) in enumerate(sim_levels.iteritems()):
# Do not exceed the maximum number of lines
if i == max_lines: break
sane_values = []
for name in done_levels:
find_levels = [level for level in level_values
if name.lower() in level]
if len(find_levels) > 1:
errStr = ("More than one level matches module name "
"{}").format(name)
raise RuntimeError(errStr)
if len(find_levels) == 0:
value = np.nan
else:
found_level = find_levels[0]
value = level_values[found_level]
sane_values.append(value)
plt.plot(x, sane_values, '-o', label=sim_key)
plt.ylabel(metadata.title)
plt.xticks(x, done_levels, rotation='vertical')
y_label = metadata.title
if metadata.units is not None:
y_label = "{} ({})".format(y_label, metadata.units[0])
plt.ylabel(y_label)
plt.legend(loc=legend_loc)
plt.title("Module Comparison ({} Scope)".format(scope.capitalize()))
plt.tight_layout()
return plt.gcf()
def get_comparison_values(self, core,
project,
var_one_id,
var_two_id,
module=None,
strategy=None,
scope="global",
sort=True):
if scope not in ["global", "local"]:
errStr = ("Argument 'scope' must have value 'global' or 'local', "
"not {}").format(scope)
raise ValueError(errStr)
all_modules = self._module_menu.get_active(core, project)
# Determine at which module to carry out the comparison
if module is None:
module = all_modules[-1]
elif module not in all_modules:
errStr = ("Module '{}' is not in the list of active "
"modules").format(module)
raise ValueError(errStr)
output_level = "{} {} output".format(module, scope).lower()
# If a strategy is given then just use its simulation indexes
if strategy is None:
sim_indexes = None
else:
sim_indexes = strategy.get_simulation_record()
var_one_values = core.get_project_values(project,
var_one_id,
output_level,
force_indexes=sim_indexes,
allow_none=True)
var_two_values = core.get_project_values(project,
var_two_id,
output_level,
force_indexes=sim_indexes,
allow_none=True)
if var_one_values is None or var_two_values is None:
x = []
y = []
else:
x = [v for (n,v) in var_one_values]
y = [v for (n,v) in var_two_values]
if not sort: return x, y
# Sort by the x value
points = zip(x, y)
sorted_points = sorted(points)
new_x = [point[0] for point in sorted_points]
new_y = [point[1] for point in sorted_points]
return new_x, new_y
def get_comparison_values_df(self, core,
project,
var_one_id,
var_two_id,
module=None,
strategy=None,
scope="global"):
# Get the comparison values
x, y = self.get_comparison_values(core,
project,
var_one_id,
var_two_id,
module=module,
strategy=strategy,
scope=scope)
# Redetermine the module used
if module is None:
all_modules = self._module_menu.get_active(core, project)
module = all_modules[-1]
var_one_meta = core.get_metadata(var_one_id)
var_two_meta = core.get_metadata(var_two_id)
var_one_str = var_one_meta.title
if var_one_meta.units is not None:
var_one_str = "{} ({})".format(var_one_str,
var_one_meta.units[0])
var_two_str = var_two_meta.title
if var_two_meta.units is not None:
var_two_str = "{} ({})".format(var_two_str,
var_two_meta.units[0])
raw_dict = {var_one_str: x,
var_two_str: y}
raw_cols = [var_one_str, var_two_str]
df = pd.DataFrame(raw_dict)
df = df[raw_cols]
return df
def get_comparison_values_plot(self, core,
project,
var_one_id,
var_two_id,
module=None,
strategy=None,
scope="global"):
# Get the comparison values
x, y = self.get_comparison_values(core,
project,
var_one_id,
var_two_id,
module=module,
strategy=strategy,
scope=scope)
# Convert any string x-values to a numerical range and add ticks later
x_ticks = None
if any(isinstance(i, basestring) for i in x):
x_ticks = x
x = range(len(x))
plt.figure()
# Redetermine the module used
if module is None:
all_modules = self._module_menu.get_active(core, project)
module = all_modules[-1]
var_one_meta = core.get_metadata(var_one_id)
var_two_meta = core.get_metadata(var_two_id)
var_one_str = var_one_meta.title
if var_one_meta.units is not None:
var_one_str = "{} ({})".format(var_one_str,
var_one_meta.units[0])
var_two_str = var_two_meta.title
if var_two_meta.units is not None:
var_two_str = "{} ({})".format(var_two_str,
var_two_meta.units[0])
if x_ticks is None:
if len(x) == 1:
plt.plot(x, y, marker='o', ls='')
else:
plt.plot(x,y)
else:
plt.bar(x, y, align="center")
plt.xticks(x, x_ticks, rotation='vertical')
plt.xlabel(var_one_str)
plt.ylabel(var_two_str)
plt.title("Simulation Comparison at Module: {} "
"({} Scope)".format(module, scope.capitalize()),
y=1.08)
plt.tight_layout()
return plt.gcf()
def dump_strategy(self, strategy, dump_path):
if os.path.splitext(dump_path)[1] != ".pkl":
errStr = "Argument dump_path must be a file with .pkl extension"
raise ValueError(errStr)
stg_dict = self._get_dump_dict(strategy)
with open(dump_path, 'wb') as fstream:
pickle.dump(stg_dict, fstream, -1)
return
def load_strategy(self, load_path):
# OK need to consider if we have a pkl file
if not os.path.isfile(load_path) and not ".pkl" in load_path:
errStr = ("Argument load_path must be a file with .pkl extension")
raise ValueError(errStr)
# Load the strategy file
with open(load_path, 'rb') as fstream:
stg_dict = pickle.load(fstream)
new_strategy = self._set_load_dict(stg_dict)
return new_strategy
def _get_dump_dict(self, strategy):
# Now store the strategy information
stg_name_str = strategy.get_name()
stg_dict = {"name": stg_name_str,
"sim_record": strategy._sim_record,
"config": strategy._config,
"sim_details": strategy.sim_details}
return stg_dict
def _set_load_dict(self, stg_dict):
# Now build the strategy
new_strategy = self.get_strategy(stg_dict["name"])
# Now deserialise the data
new_strategy._sim_record = stg_dict["sim_record"]
new_strategy._config = stg_dict["config"]
new_strategy.sim_details = stg_dict["sim_details"]
return new_strategy
class ToolManager(ExtensionManager):
"""Tool discovery and execution"""
def __init__(self, module=tools, cls_name="Tool"):
super(ToolManager, self).__init__(module, cls_name)
return
def get_tool(self, tool_name):
if tool_name not in self.get_available():
errStr = ("Name {} is not a recognised "
"tool").format(tool_name)
raise KeyError(errStr)
tool_obj = self._get_plugin(tool_name)
return tool_obj
def can_execute_tool(self, core, project, tool):
if project is None: return False
result = False
if core.can_load_interface(project, tool):
result = True
return result
def execute_tool(self, core, project, tool):
if not core.can_load_interface(project, tool):
errStr = ("The inputs to tool {} are not "
"satisfied.").format(tool.get_name())
raise ValueError(errStr)
interface = core.load_interface(project, tool)
core.connect_interface(project, interface)
return
| gpl-3.0 |
anhaidgroup/py_entitymatching | py_entitymatching/tests/test_sampler_single_table.py | 1 | 2404 | # coding=utf-8
import os
from nose.tools import *
import unittest
import pandas as pd
import six
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.sampler.single_table import sample_table
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class SamplerSingleTableTestCases(unittest.TestCase):
def test_sample_table_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
D = sample_table(C, 10, False)
self.assertEqual(cm.get_all_properties(C), cm.get_all_properties(D))
self.assertEqual(len(D), 10)
def test_sample_table_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
D = sample_table(C, 10, True)
self.assertEqual(id(cm.get_ltable(D)), id(cm.get_ltable(C)))
self.assertEqual(id(cm.get_rtable(D)), id(cm.get_rtable(C)))
self.assertEqual(cm.get_fk_ltable(D), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(D), cm.get_fk_rtable(C))
self.assertEqual(len(D), 10)
@raises(AssertionError)
def test_sample_table_invalid_df(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
D = sample_table(None, 10, True)
# self.assertEqual(cm.get_all_properties(C), cm.get_all_properties(D))
# self.assertEqual(len(D), 10)
@raises(AssertionError)
def test_sample_table_invalid_size(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
D = sample_table(C, len(C)+1, True)
@raises(AssertionError)
def test_sample_table_invalid_df_sz0(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
D = sample_table(pd.DataFrame(), 1, True)
| bsd-3-clause |
gpetretto/pymatgen | pymatgen/io/abinit/tests/test_abiinspect.py | 4 | 4222 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import tempfile
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.abiinspect import *
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
try:
import matplotlib
matplotlib.use("pdf") # Use non-graphical display backend during test.
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
def test_base(self):
string = \
"""---
none: [~, null]
bool: [true, false, on, off]
int: 42
float: 3.14159
list: [LITE, RES_ACID, SUS_DEXT]
dict: {hp: 13, sp: 5}
...
this is not a YAML document!
and the tokenizer will ignore it
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
...
This is not a proper document since it does not start with ---
the end tag below is ignored
...
--- !Monster
name: Dragon
hp: [2,6] # 2d6
ac: 32
attacks: [BITE, HURT]
...
"""
#for i, line in enumerate(string.splitlines()): print(i, line)
fd, filename = tempfile.mkstemp(text=True)
with open(filename, "w") as fh:
fh.write(string)
doc_tags = [None, "!Monster", "!Monster"]
doc_linenos = [1, 13, 23]
with YamlTokenizer(filename) as r:
# Iterate the docs
n = 0
for i, doc in enumerate(r):
n += 1
print("doc", doc)
self.assertTrue(doc.tag == doc_tags[i])
self.assertTrue(doc.lineno == doc_linenos[i])
self.assertTrue(n == len(doc_tags))
# Read all docs present in the file.
r.seek(0)
all_docs = r.all_yaml_docs()
#print(all_docs)
self.assertTrue(len(all_docs) == 3)
# We should be at the begining at the file.
self.assertTrue(all_docs == r.all_yaml_docs())
# Find documents by tag.
r.seek(0)
monster = r.next_doc_with_tag("!Monster")
#print("monster",monster)
self.assertTrue(monster == all_docs[1])
monster = r.next_doc_with_tag("!Monster")
self.assertTrue(monster == all_docs[2])
# this should raise StopIteration
with self.assertRaises(StopIteration):
monster = r.next_doc_with_tag("!Monster")
# os.remove(filename)
class AbinitInpectTest(PymatgenTest):
def test_scfcycle(self):
"""Testing ScfCycle."""
cycle = GroundStateScfCycle.from_file(ref_file("mgb2_scf.abo"))
str(cycle)
cycle.to_string(verbose=2)
assert cycle.num_iterations == 6
last = cycle.last_iteration
assert last["Etot(hartree)"] == -7.1476241568657 and last["vres2"] == 3.879E-08
assert list(cycle["vres2"]) == [1.769E+02, 7.920E-01, 1.570E-01, 4.259E-03, 4.150E-05, 3.879E-08]
# TODO: Reactivate
#if have_matplotlib:
# assert cycle.plot(show=False)
# Testing CyclesPlotter.
p = CyclesPlotter()
p.add_label_cycle("mgb2 SCF", cycle)
p.add_label_cycle("same SCF", cycle)
# TODO: Reactivate
#if have_matplotlib:
# assert p.combiplot(show=False)
# p.slideshow()
def test_relaxation(self):
"""Testing Relaxation object."""
relaxation = Relaxation.from_file(ref_file("sic_relax.abo"))
print(relaxation)
assert len(relaxation) == 4
assert relaxation[0]["Etot(hartree)"][-1] == -8.8077409200473
assert relaxation[-1]["Etot(hartree)"][-1] == -8.8234906607147
for scf_step in relaxation:
print(scf_step.num_iterations)
# TODO: Reactivate
#if have_matplotlib:
# relaxation.plot(show=False)
# relaxation.slideshow(show=False)
| mit |
bgroveben/python3_machine_learning_projects | oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_kmeans.py | 5 | 5791 | import numpy as np
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
import matplotlib as mpl
from cycler import cycler
from .tools import discrete_scatter
from .plot_2d_separator import plot_2d_classification
from .plot_helpers import cm3
def plot_kmeans_algorithm():
X, y = make_blobs(random_state=1)
# we don't want cyan in there
with mpl.rc_context(rc={'axes.prop_cycle': cycler('color', ['#0000aa',
'#ff2020',
'#50ff50'])}):
fig, axes = plt.subplots(3, 3, figsize=(10, 8), subplot_kw={'xticks': (), 'yticks': ()})
axes = axes.ravel()
axes[0].set_title("Input data")
discrete_scatter(X[:, 0], X[:, 1], ax=axes[0], markers=['o'], c='w')
axes[1].set_title("Initialization")
init = X[:3, :]
discrete_scatter(X[:, 0], X[:, 1], ax=axes[1], markers=['o'], c='w')
discrete_scatter(init[:, 0], init[:, 1], [0, 1, 2], ax=axes[1],
markers=['^'], markeredgewidth=2)
axes[2].set_title("Assign Points (1)")
km = KMeans(n_clusters=3, init=init, max_iter=1, n_init=1).fit(X)
centers = km.cluster_centers_
# need to compute labels by hand. scikit-learn does two e-steps for max_iter=1
# (and it's totally my fault)
labels = np.argmin(pairwise_distances(init, X), axis=0)
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[2])
discrete_scatter(init[:, 0], init[:, 1], [0, 1, 2],
ax=axes[2], markers=['^'], markeredgewidth=2)
axes[3].set_title("Recompute Centers (1)")
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[3])
discrete_scatter(centers[:, 0], centers[:, 1], [0, 1, 2],
ax=axes[3], markers=['^'], markeredgewidth=2)
axes[4].set_title("Reassign Points (2)")
km = KMeans(n_clusters=3, init=init, max_iter=1, n_init=1).fit(X)
labels = km.labels_
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[4])
discrete_scatter(centers[:, 0], centers[:, 1], [0, 1, 2],
ax=axes[4], markers=['^'], markeredgewidth=2)
km = KMeans(n_clusters=3, init=init, max_iter=2, n_init=1).fit(X)
axes[5].set_title("Recompute Centers (2)")
centers = km.cluster_centers_
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[5])
discrete_scatter(centers[:, 0], centers[:, 1], [0, 1, 2],
ax=axes[5], markers=['^'], markeredgewidth=2)
axes[6].set_title("Reassign Points (3)")
labels = km.labels_
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[6])
markers = discrete_scatter(centers[:, 0], centers[:, 1], [0, 1, 2],
ax=axes[6], markers=['^'],
markeredgewidth=2)
axes[7].set_title("Recompute Centers (3)")
km = KMeans(n_clusters=3, init=init, max_iter=3, n_init=1).fit(X)
centers = km.cluster_centers_
discrete_scatter(X[:, 0], X[:, 1], labels, markers=['o'],
ax=axes[7])
discrete_scatter(centers[:, 0], centers[:, 1], [0, 1, 2],
ax=axes[7], markers=['^'], markeredgewidth=2)
axes[8].set_axis_off()
axes[8].legend(markers, ["Cluster 0", "Cluster 1", "Cluster 2"], loc='best')
def plot_kmeans_boundaries():
X, y = make_blobs(random_state=1)
init = X[:3, :]
km = KMeans(n_clusters=3, init=init, max_iter=2, n_init=1).fit(X)
discrete_scatter(X[:, 0], X[:, 1], km.labels_, markers=['o'])
discrete_scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
[0, 1, 2], markers=['^'], markeredgewidth=2)
plot_2d_classification(km, X, cm=cm3, alpha=.4)
def plot_kmeans_faces(km, pca, X_pca, X_people, y_people, target_names):
n_clusters = 10
image_shape = (87, 65)
fig, axes = plt.subplots(n_clusters, 11, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(10, 15), gridspec_kw={"hspace": .3})
for cluster in range(n_clusters):
center = km.cluster_centers_[cluster]
mask = km.labels_ == cluster
dists = np.sum((X_pca - center) ** 2, axis=1)
dists[~mask] = np.inf
inds = np.argsort(dists)[:5]
dists[~mask] = -np.inf
inds = np.r_[inds, np.argsort(dists)[-5:]]
axes[cluster, 0].imshow(pca.inverse_transform(center).reshape(image_shape), vmin=0, vmax=1)
for image, label, asdf, ax in zip(X_people[inds], y_people[inds],
km.labels_[inds], axes[cluster, 1:]):
ax.imshow(image.reshape(image_shape), vmin=0, vmax=1)
ax.set_title("%s" % (target_names[label].split()[-1]), fontdict={'fontsize': 9})
# add some boxes to illustrate which are similar and which dissimilar
rec = plt.Rectangle([-5, -30], 73, 1295, fill=False, lw=2)
rec = axes[0, 0].add_patch(rec)
rec.set_clip_on(False)
axes[0, 0].text(0, -40, "Center")
rec = plt.Rectangle([-5, -30], 385, 1295, fill=False, lw=2)
rec = axes[0, 1].add_patch(rec)
rec.set_clip_on(False)
axes[0, 1].text(0, -40, "Close to center")
rec = plt.Rectangle([-5, -30], 385, 1295, fill=False, lw=2)
rec = axes[0, 6].add_patch(rec)
rec.set_clip_on(False)
axes[0, 6].text(0, -40, "Far from center")
| mit |
roxyboy/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
roxyboy/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
quoccuongngo/speech-recognition | deep_learning_course/notmnist.py | 2 | 8640 | from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from urllib.request import urlretrieve
from six.moves import cPickle as pickle
import time
start_time = time.clock()
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '/home/stack/PycharmProjects/speech-recognition/deep-learning-course/dataset' # noqa
def download_progress_hook(count, block_size, total_size):
"""A hook to report the progress of a download. This is mostly intended for
users with slow internet connections. Reports every 5% change in download
progress.
"""
global last_percent_reported
percent = int(count * block_size * 100 / total_size)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url=url + filename, filename=dest_filename,
reporthook=download_progress_hook)
print('\nDownload complete')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception('Failed to verify ' + dest_filename
+ '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
print('%s already present - skipping extraction of %s.'
% (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.'
% root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))
]
if len(data_folders) != num_classes:
raise Exception('Expected %d folders, one per class. Found %d instead.'
% (num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
image_size = 28 # px
pixel_depth = 255.0
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
# dataset - 3D array: image_count * (image_size * image_size)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s'
% str(image_data.shape))
# Store `image_data` into `dataset` at `num_images`
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e,
'- it\'s OK, skipping.')
# Skip empty elements
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d'
% (num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and
# training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
stat_info = os.stat(pickle_file)
print('Compressed pickle size:', stat_info.st_size)
print(time.clock() - start_time, "seconds")
| gpl-3.0 |
xiaoxiamii/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
JudoWill/glue | glue/core/fitters.py | 1 | 11654 | """
Glue's fitting classes are designed to be easily subclassed for performing
custom model fitting in Glue.
See the guide on :ref:`writing custom fit plugins <fit_plugins>` for
help with using custom fitting utilities in Glue.
"""
import numpy as np
from .simpleforms import IntOption, Option
__all__ = ['BaseFitter1D',
'PolynomialFitter',
'AstropyFitter1D',
'SimpleAstropyGaussianFitter',
'BasicGaussianFitter']
class BaseFitter1D(object):
"""
Base class for 1D fitters.
This abstract class must be overwritten.
"""
label = "Fitter"
"""A short label for the fit, used by the GUI"""
param_names = []
"""list of parameter names that support restrictions"""
def __init__(self, **params):
self._constraints = {}
for k, v in params.items():
if k in self.param_names:
self.set_constraint(k, value=v)
else:
setattr(self, k, v)
def plot(self, fit_result, axes, x):
"""
Plot the result of a fit.
:param fit_result: The output from fit
:param axes: The Matplotlib axes to add the fit to
:param x: The values of X at which to visualize the model
:returns: A list of matplotlib artists. **This is important:**
plots will not be properly cleared if this isn't provided
"""
y = self.predict(fit_result, x)
result = axes.plot(x, y, '#4daf4a',
lw=3, alpha=0.8,
scalex=False, scaley=False)
return result
def _sigma_to_weights(self, dy):
if dy is not None:
return 1. / np.asarray(dy) ** 2
@property
def options(self):
"""
A dictionary of the current setting of each model hyperparameter.
Hyperparameters are defined in subclasses by creating class-level
:mod:`Option <glue.core.simpleforms>` attributes. This attribute
dict maps ``{hyperparameter_name: current_value}``
"""
result = []
for typ in type(self).mro():
result.extend(k for k, v in typ.__dict__.items()
if isinstance(v, Option))
return dict((o, getattr(self, o)) for o in result)
def summarize(self, fit_result, x, y, dy=None):
"""
Return a textual summary of the fit.
:param fit_result: The return value from :meth:`fit`
:param x: The x values passed to :meth:`fit`
:returns: A description of the fit result
:rtype: str
"""
return str(fit_result)
@property
def constraints(self):
"""
A dict of the constraints on each parameter in :attr:`param_names`.
Each value is itself a dict with 3 items:
:key value: The default value
:key fixed: True / False, indicating whether the parameter is fixed
:key bounds: [min, max] or None, indicating lower/upper limits
"""
result = {}
for p in self.param_names:
result[p] = dict(value=None, fixed=False, limits=None)
result[p].update(self._constraints.get(p, {}))
return result
def set_constraint(self, parameter_name, value=None,
fixed=None, limits=None):
"""
Update a constraint.
:param parameter_name: name of the parameter to update
:type parameter_name: str
:param value: Set the default value (optional)
:param limits: Set the limits to[min, max] (optional)
:param fixed: Set whether the parameter is fixed (optional)
"""
c = self._constraints.setdefault(parameter_name, {})
if value is not None:
c['value'] = value
if fixed is not None:
c['fixed'] = fixed
if limits is not None:
c['limits'] = limits
def build_and_fit(self, x, y, dy=None):
"""
Method which builds the arguments to fit, and calls that method
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if dy is not None:
dy = np.asarray(dy).ravel()
return self.fit(x, y, dy=dy,
constraints=self.constraints,
**self.options)
def fit(self, x, y, dy, constraints, **options):
"""
Fit the model to data.
*This must be overriden by a subclass.*
:param x: The x values of the data
:type x: :class:`numpy.ndarray`
:param y: The y values of the data
:type y: :class:`numpy.ndarray`
:param dy: 1 sigma uncertainties on each datum (optional)
:type dy: :class:`numpy.ndarray`
:param constraints: The current value of :attr:`constraints`
:param options: kwargs for model hyperparameters.
:returns: An object representing the fit result.
"""
raise NotImplementedError()
def predict(self, fit_result, x):
"""
Evaulate the model at a set of locations.
**This must be overridden in a subclass.**
:param fit_result: The result from the fit method
:param x: Locations to evaluate model at
:type x: :class:`numpy.ndarray`
:returns: model(x)
:rtype: :class:`numpy.ndarray`
"""
raise NotImplementedError()
class AstropyFitter1D(BaseFitter1D):
"""
A base class for wrapping :mod:`astropy.modeling`.
Subclasses must override :attr:`model_cls` :attr:`fitting_cls`
to point to the desired Astropy :mod:`model <astropy.modeling>`
and :mod:`fitter <astropy.modeling.fitting>` classes.
In addition, they should override :attr:`label` with a better label,
and :meth:`parameter_guesses` to generate initial guesses
"""
model_cls = None
"""class describing the model"""
fitting_cls = None
"""class to fit the model"""
label = "Base Astropy Fitter"
"""UI Label"""
@property
def param_names(self):
return self.model_cls.param_names
def predict(self, fit_result, x):
model, _ = fit_result
return model(x)
def summarize(self, fit_result, x, y, dy=None):
model, fitter = fit_result
result = [_report_fitter(fitter), ""]
pnames = list(sorted(model.param_names))
maxlen = max(map(len, pnames))
result.extend("%s = %e" % (p.ljust(maxlen), getattr(model, p).value)
for p in pnames)
return "\n".join(result)
def fit(self, x, y, dy, constraints):
m, f = self._get_model_fitter(x, y, dy, constraints)
dy = self._sigma_to_weights(dy)
return f(m, x, y, weights=dy), f
def _get_model_fitter(self, x, y, dy, constraints):
if self.model_cls is None or self.fitting_cls is None:
raise NotImplementedError("Model or fitting class is unspecified.")
params = dict((k, v['value']) for k, v in constraints.items())
# update unset parameters with guesses from data
for k, v in self.parameter_guesses(x, y, dy).items():
if params[k] is not None or constraints[k]['fixed']:
continue
params[k] = v
m = self.model_cls(**params)
f = self.fitting_cls()
for param_name, constraint in constraints.items():
param = getattr(m, param_name)
if constraint['fixed']:
param.fixed = True
if constraint['limits']:
param.min, param.max = constraint['limits']
return m, f
def parameter_guesses(self, x, y, dy):
"""
Provide initial guesses for each model parameter.
**The base implementation does nothing, and should be overridden**
:param x: X - values of the data
:type x: :class:`numpy.ndarray`
:param y: Y - values of the data
:type y: :class:`numpy.ndarray`
:param dy: ncertainties on Y(assumed to be 1 sigma)
:type dy: :class:`numpy.ndarray`
:returns: A dict maping ``{parameter_name: value guess}`` for each
parameter
"""
return {}
def _gaussian_parameter_estimates(x, y, dy):
amplitude = np.percentile(y, 95)
y = np.maximum(y / y.sum(), 0)
mean = (x * y).sum()
stddev = np.sqrt((y * (x - mean) ** 2).sum())
return dict(mean=mean, stddev=stddev, amplitude=amplitude)
class BasicGaussianFitter(BaseFitter1D):
"""
Fallback Gaussian fitter, for astropy < 0.3.
If :mod:`astropy.modeling` is installed, this class is replaced by
:class:`SimpleAstropyGaussianFitter`
"""
label = "Gaussian"
def _errorfunc(self, params, x, y, dy):
yp = self.eval(x, *params)
result = (yp - y)
if dy is not None:
result /= dy
return result
@staticmethod
def eval(x, amplitude, mean, stddev):
return np.exp(-(x - mean) ** 2 / (2 * stddev ** 2)) * amplitude
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
def fit(self, x, y, dy, constraints):
from scipy import optimize
init_values = _gaussian_parameter_estimates(x, y, dy)
init_values = [init_values[p] for p in ['amplitude', 'mean', 'stddev']]
farg = (x, y, dy)
dfunc = None
fitparams, status, dinfo, mess, ierr = optimize.leastsq(
self._errorfunc, init_values, args=farg, Dfun=dfunc,
full_output=True)
return fitparams
def predict(self, fit_result, x):
return self.eval(x, *fit_result)
def summarize(self, fit_result, x, y, dy=None):
return ("amplitude = %e\n"
"mean = %e\n"
"stddev = %e" % tuple(fit_result))
GaussianFitter = BasicGaussianFitter
try:
from astropy.modeling import models, fitting
class SimpleAstropyGaussianFitter(AstropyFitter1D):
"""
Guassian fitter using astropy.modeling.
"""
model_cls = models.Gaussian1D
try:
fitting_cls = fitting.LevMarLSQFitter
except AttributeError: # astropy v0.3
fitting_cls = fitting.NonLinearLSQFitter
label = "Gaussian"
parameter_guesses = staticmethod(_gaussian_parameter_estimates)
GaussianFitter = SimpleAstropyGaussianFitter
except ImportError:
pass
class PolynomialFitter(BaseFitter1D):
"""
A polynomial model.
The degree of the polynomial is specified by :attr:`degree`
"""
label = "Polynomial"
degree = IntOption(min=0, max=5, default=3, label="Polynomial Degree")
def fit(self, x, y, dy, constraints, degree=2):
"""
Fit a ``degree``-th order polynomial to the data.
"""
w = self._sigma_to_weights(dy)
return np.polyfit(x, y, degree, w=w)
def predict(self, fit_result, x):
return np.polyval(fit_result, x)
def summarize(self, fit_result, x, y, dy=None):
return "Coefficients:\n" + "\n".join("%e" % coeff
for coeff in fit_result.tolist())
def _report_fitter(fitter):
if "nfev" in fitter.fit_info:
return "Converged in %i iterations" % fitter.fit_info['nfev']
return 'Converged'
__FITTERS__ = [PolynomialFitter, GaussianFitter]
| bsd-3-clause |
YoshikawaMasashi/magenta | magenta/tools/pip/setup.py | 2 | 3683 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setuptools based setup module for magenta."""
from setuptools import find_packages
from setuptools import setup
# Bit of a hack to parse the version string stored in version.py without
# executing __init__.py, which will end up requiring a bunch of dependencies to
# execute (e.g., tensorflow, pretty_midi, etc.).
# Makes the __version__ variable available.
execfile('magenta/version.py')
REQUIRED_PACKAGES = [
'IPython',
'Pillow >= 3.4.2',
'intervaltree >= 2.1.0',
'matplotlib >= 1.5.3',
'mido >= 1.1.17',
'pretty_midi >= 0.2.6',
'scipy >= 0.18.1',
'tensorflow >= 1.0.0',
'wheel',
]
CONSOLE_SCRIPTS = [
'magenta.interfaces.midi.magenta_midi',
'magenta.models.drums_rnn.drums_rnn_create_dataset',
'magenta.models.drums_rnn.drums_rnn_generate',
'magenta.models.drums_rnn.drums_rnn_train',
'magenta.models.image_stylization.image_stylization_create_dataset',
'magenta.models.image_stylization.image_stylization_evaluate',
'magenta.models.image_stylization.image_stylization_finetune',
'magenta.models.image_stylization.image_stylization_train',
'magenta.models.image_stylization.image_stylization_transform',
'magenta.models.improv_rnn.improv_rnn_create_dataset',
'magenta.models.improv_rnn.improv_rnn_generate',
'magenta.models.improv_rnn.improv_rnn_train',
'magenta.models.melody_rnn.melody_rnn_create_dataset',
'magenta.models.melody_rnn.melody_rnn_generate',
'magenta.models.melody_rnn.melody_rnn_train',
'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
'magenta.models.polyphony_rnn.polyphony_rnn_generate',
'magenta.models.polyphony_rnn.polyphony_rnn_train',
'magenta.models.rl_tuner.rl_tuner_train',
'magenta.scripts.convert_dir_to_note_sequences',
]
setup(
name='magenta',
version=__version__, # pylint: disable=undefined-variable
description='Use machine learning to create art and music',
long_description='',
url='https://magenta.tensorflow.org/',
author='Google Inc.',
author_email='[email protected]',
license='Apache 2',
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
keywords='tensorflow machine learning magenta music art',
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
entry_points={
'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
},
include_package_data=True,
package_data={
'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
},
)
| apache-2.0 |
rgommers/statsmodels | statsmodels/tsa/base/tests/test_datetools.py | 28 | 5620 | from datetime import datetime
import numpy.testing as npt
from statsmodels.tsa.base.datetools import (_date_from_idx,
_idx_from_dates, date_parser, date_range_str, dates_from_str,
dates_from_range, _infer_freq, _freq_to_pandas)
from pandas import DatetimeIndex, PeriodIndex
def test_date_from_idx():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_date_from_idx(d1, idx, 'Q'), datetime(2012, 9, 30))
npt.assert_equal(_date_from_idx(d1, idx, 'A'), datetime(2023, 12, 31))
npt.assert_equal(_date_from_idx(d1, idx, 'B'), datetime(2009, 1, 21))
npt.assert_equal(_date_from_idx(d1, idx, 'D'), datetime(2009, 1, 15))
npt.assert_equal(_date_from_idx(d1, idx, 'W'), datetime(2009, 4, 12))
npt.assert_equal(_date_from_idx(d1, idx, 'M'), datetime(2010, 3, 31))
def test_idx_from_date():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_idx_from_dates(d1, datetime(2012, 9, 30), 'Q'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2023, 12, 31), 'A'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 21), 'B'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 15), 'D'), idx)
# move d1 and d2 forward to end of week
npt.assert_equal(_idx_from_dates(datetime(2009, 1, 4),
datetime(2009, 4, 17), 'W'), idx-1)
npt.assert_equal(_idx_from_dates(d1, datetime(2010, 3, 31), 'M'), idx)
def test_regex_matching_month():
t1 = "1999m4"
t2 = "1999:m4"
t3 = "1999:mIV"
t4 = "1999mIV"
result = datetime(1999, 4, 30)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_regex_matching_quarter():
t1 = "1999q4"
t2 = "1999:q4"
t3 = "1999:qIV"
t4 = "1999qIV"
result = datetime(1999, 12, 31)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_dates_from_range():
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 12, 31, 0, 0),
datetime(1962, 3, 31, 0, 0),
datetime(1962, 6, 30, 0, 0)]
dt_range = dates_from_range('1959q1', '1962q2')
npt.assert_(results == dt_range)
# test with starting period not the first with length
results = results[2:]
dt_range = dates_from_range('1959q3', length=len(results))
npt.assert_(results == dt_range)
# check month
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 4, 30, 0, 0),
datetime(1959, 5, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 7, 31, 0, 0),
datetime(1959, 8, 31, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 10, 31, 0, 0),
datetime(1959, 11, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 1, 31, 0, 0),
datetime(1960, 2, 28, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 4, 30, 0, 0),
datetime(1960, 5, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 7, 31, 0, 0),
datetime(1960, 8, 31, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 10, 31, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 1, 31, 0, 0),
datetime(1961, 2, 28, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 4, 30, 0, 0),
datetime(1961, 5, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 7, 31, 0, 0),
datetime(1961, 8, 31, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 10, 31, 0, 0)]
dt_range = dates_from_range("1959m3", length=len(results))
def test_infer_freq():
d1 = datetime(2008, 12, 31)
d2 = datetime(2012, 9, 30)
b = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['B']).values
d = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['D']).values
w = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['W']).values
m = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['M']).values
a = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['A']).values
q = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['Q']).values
assert _infer_freq(w) == 'W-SUN'
assert _infer_freq(a) == 'A-DEC'
assert _infer_freq(q) == 'Q-DEC'
assert _infer_freq(w[:3]) == 'W-SUN'
assert _infer_freq(a[:3]) == 'A-DEC'
assert _infer_freq(q[:3]) == 'Q-DEC'
assert _infer_freq(b[2:5]) == 'B'
assert _infer_freq(b[:3]) == 'D'
assert _infer_freq(b) == 'B'
assert _infer_freq(d) == 'D'
assert _infer_freq(m) == 'M'
assert _infer_freq(d[:3]) == 'D'
assert _infer_freq(m[:3]) == 'M'
def test_period_index():
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
npt.assert_(_infer_freq(dates) == "M")
| bsd-3-clause |
stscieisenhamer/ginga | mkapp/setup.py | 4 | 2838 | # -*- coding: iso-8859-1 -*-
"""
Build a standalone application for Mac OS X and MS Windows platforms
Usage (Mac OS X):
python setup.py py2app
Usage (Windows):
python setup.py py2exe
"""
import sys
from setuptools import setup
info_plist_template = u"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleName</key>
<string>Ginga</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Copyright © 2010-2016, Eric Jeschke ([email protected])</string>
<key>CFBundleIconFile</key>
<string>Ginga.icns</string>
<!-- Version number - appears in About box -->
<key>CFBundleShortVersionString</key>
<string>%(version)s</string>
<!-- Build number - appears in About box -->
<key>CFBundleVersion</key>
<string>%(build)s</string>
<!-- Copyright notice - apears in About box -->
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2010-2016, Eric Jeschke ([email protected])</string>
<!-- Globally unique identifier -->
<key>CFBundleIdentifier</key>
<string>org.naoj.Ginga</string>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>Ginga</string>
<key>CFBundleDisplayName</key>
<string>Ginga</string>
</dict>
</plist>
"""
from ginga import __version__
import ginga.util.six as six
d = dict(version=__version__, build=__version__.replace('.', ''))
plist = info_plist_template % d
with open('Info.plist', 'w') as out_f:
if six.PY2:
out_f.write(plist.encode('utf-8'))
else:
out_f.write(plist)
APP = ['Ginga.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
'compressed': True,
#'packages': 'ginga,scipy,numpy,kapteyn,astropy,PIL,matplotlib',
'packages': 'ginga,scipy,numpy,astropy,PIL,matplotlib',
'includes': ['sip', 'PyQt4._qt',],
# currently creating some problems with the app build on mac os x
# so exclude
'excludes': ['cv2',],
'matplotlib_backends': 'Qt4Agg',
}
if sys.platform == 'darwin':
# mac-specific options
OPTIONS['plist'] = 'Info.plist'
OPTIONS['iconfile'] = 'Ginga.icns'
extra_options = dict(
setup_requires=['py2app'],
options={'py2app': OPTIONS},
)
elif sys.platform == 'win32':
extra_options = dict(
setup_requires=['py2exe'],
options={'py2exe': OPTIONS},
)
else:
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=["ginga"],
)
setup_requires=['py2app'],
setup(
name="Ginga",
app=APP,
data_files=DATA_FILES,
**extra_options
)
| bsd-3-clause |
yl565/statsmodels | statsmodels/datasets/tests/test_utils.py | 1 | 1659 | from statsmodels.compat.python import PY3
import os
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
dec.skipif(PY3, 'Not testable on Python 3.x')
def test_get_rdataset():
# smoke test
if not PY3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://www.statsmodels.org/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
abramhindle/dbn-bitmap-to-pcm | osborne-combined-stft-both-fft2/stft-videosonify.py | 1 | 7371 | import sys
import cv2
import numpy as np
from numpy import *
import random
import time
import pickle
import os.path
import scipy.io
import scipy.io.wavfile
from scikits.audiolab import play
import theanets
import pickle
import numpy as np
import scikits.audiolab
#import sounddevice as sd
import random
#sd.default.samplerate = 44100
#sd.default.channels = 1
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, show, imshow
if len(sys.argv) < 2:
print "Opening vtest.avi"
cap = cv2.VideoCapture("vtest.avi")
else:
print "Opening %s" % sys.argv[1]
cap = cv2.VideoCapture(sys.argv[1])
running = True
frames = []
# load brain
# cv2.namedWindow("frame", 1)
brain = theanets.feedforward.Regressor.load("stft-theanet.py.net.pkl")#brain-1438666035")
#brain = theanets.feedforward.Regressor.load("brain-1438666035")
brain._graphs = {}
brain._functions = {}
outwav = scikits.audiolab.Sndfile("out.wav",mode='w',format=scikits.audiolab.Format(),channels=1,samplerate=30720)
ret, frame = cap.read()
#class BufferPlayer:
# def __init__(self):
# self.base = 4096
# self.size = 2*self.base
# self.buffer = bp.zeros(self.base)
# self.oldbuffs = []
#
# def add(self, arr):
# self.oldbuffs.append(arr)
#
# def play(self):
# ''' play the next thing '''
#
# sd.play(out[0], 22050)
def gaussian_noise(inarr,mean=0.0,scale=1.0):
noise = np.random.normal(mean,scale,inarr.shape)
return inarr + noise.reshape(inarr.shape)
outs = []
window_size = 2048
#windowed = scipy.hanning(window_size)
windowed = scipy.hamming(window_size)
swin_size = window_size / 2 + 1
alen = 1024 # audio length
window = np.hanning(alen)
frames = 0
overlapsize = window_size - alen
overlap = np.zeros(overlapsize)
# crazy phase stuff
wav = scipy.io.wavfile.read("steal-phase.wav")
wavdata = wav[1].astype(np.float32)
norm = (wavdata)/(2.0**15)
# pad norm with zeros
samples = alen
nsamples = int(math.ceil(len(norm)/float(samples)))
norm.resize(samples*nsamples)
# the +1 is because there's no good relationship between samples and
# window_size it'll just add a buncha zeros anyways
norm.resize((window_size+1)*math.ceil(len(norm)/float(window_size)))
phases = np.array([np.angle(scipy.fft(norm[i*samples:i*samples+window_size])) for i in range(0,nsamples)])
phases = phases[0:phases.shape[0]-1]
#
# if we have 1/4 overlap
# __ __
# / \__ / \__
# / \__ / \__
# / \ / \
#0.5111111111111111111
#
# if we have 1/2 overlap
#
# /\ /\
# /\/\/\
# /\ /\
#0.51111110.5
flat_window = np.ones(window_size)
olaps = int(math.ceil((window_size - alen))) # half
flat_window[0:olaps] = np.arange(0,olaps)
flat_window[olaps:window_size] = np.arange(0,olaps)[::-1]
flat_window /= float(olaps-1)
# debug
# outwav.write_frames(windowed)
last_phase = np.zeros(window_size)
invwindow = 1.0/scipy.hamming(window_size)
amax=7e-3
# Exp001: init all phase @ pi/2 + static phase of np.pi/100.0 windowed
# [X] 30hz :( [ ] aesthetic [X] Robot
# Exp002: init all phase @ pi/2 + static phase of np.pi/10.0 windowed
# [X] 30hz :( [ ] aesthetic [X] Robot
# Exp003: init all phase @ pi/2 + static phase of var np.pi/2.0 normally distributed windows
# [X] 30hz :( [X] aesthetic [ ] Robot [X] Pulsing
# Exp004: init all phase @ pi/2 + static phase of var np.pi/10.0 normally distributed windows
# [X] 30hz :( [ ] aesthetic [X] Robot [ ] Pulsing
# Exp005: init all phase @ pi/2 + static phase of var np.pi normally distributed windows
# [X] 30hz :( [ ] aesthetic [X] Robot [ ] Pulsing [X] White Noisey
# Exp006: init all phase @ 0 + static phase of var np.pi/2.0 normally distributed windows
# [X] 30hz :( [ ] aesthetic [ ] Robot [X] Pulsing
# Exp007: init all phase @ pi/2 + static phase of var np.pi/2.0 uniformly distributed windows
# [X] 30hz :( [ ] aesthetic [ ] Robot [X] Pulsing
# more noisey
# Exp008: init all phase @ pi/2 + static phase of 0 to pi/2
# [X] 30hz :( [ ] aesthetic [ ] Robot [ ] Pulsing
# Exp009: init all phase @ pi/2 + static phase of pi/2 to 0
# [X] 30hz :( [ ] aesthetic [ ] Robot [ ] Pulsing
# Exp010: init normals -pi/2 to pi/2 + static phase of pi/2
# [X] 30hz :( [ ] aesthetic [X] Robot [ ] Pulsing
# Exp011: init normals -pi/2 to pi/2 + random_normals pi/10 recursive
# [ ] 30hz :( [ ] aesthetic [X] Robot [ ] Pulsing
# Exp012: get phase from another sound file
# [ ] 30hz :( [X] aesthetic [X] Robot [ ] Pulsing
cones = np.zeros(swin_size-1).astype(complex) + complex(0,1)
oldout = np.zeros(swin_size)
phase = np.zeros(window_size) #np.random.normal(0,np.pi/2, window_size)
# phase = np.random.normal(np.pi/2,np.pi,window_size)
# staticphase = np.random.normal(0,np.pi/2.0,window_size)
staticphase = np.ones(window_size).astype(float32)*np.pi/2.0
#phase = np.zeros(window_size)
dooverlaps = True
dowindowing = False
phasei=0
while(running):
ret, frame = cap.read()
if (not ret):
running = False
continue
grey = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#cv2.imshow('frame',frame)
scaled = cv2.resize(grey, (64,64))
scaled = scaled.astype(np.float32)
scaled /= 255.0
scaled = scaled.flatten()
out = brain.predict([scaled])[0]
# out is the guts of a fourier transform
# inverse fft won't work well
buf = np.zeros(window_size).astype(complex)
buf[0:swin_size] += 500.0*np.abs(out[0:swin_size])
phase[0:swin_size] = out[swin_size:swin_size+swin_size]*np.pi
# mirror around
# buf[swin_size:window_size] += -1*buf[1:swin_size-1][::-1]
# make phase
# phase = phases[phasei % phases.shape[0]]
# phasei += 1
# phase += np.random.normal(0,np.pi/10,window_size)
myfft = buf * exp(complex(0,1) * phase)
audio = scipy.real(scipy.ifft(myfft))
if (dowindowing):
audio *= windowed
last_phase = np.angle(scipy.fft(audio))
amax = max(audio.max(), amax)
if (dooverlaps):
audio[0:olaps] += overlap[0:olaps]
## should be a copy but whatever
overlap[0:olaps] *= 0
overlap[0:olaps] += audio[window_size-olaps:window_size]
outwav.write_frames(audio[0:olaps]/amax)
else:
outwav.write_frames(audio/amax)
#outwav.write_frames(windowed)
#k = cv2.waitKey(1) & 0xff
#if k == 27:
# continue
frames += 1
if frames % 30 == 0:
print (frames, amax)
outwav.write_frames(overlap)
#idea
# for each frame generate interpolate spectra
# for each frame run 1024 sinusoids and sum them manually but maintain phase?
# invent a phase vector that we add to each time to ensure that the next window has appropriate phase?
outwav.sync()
# outwav = scikits.audiolab.Sndfile("wout.wav",mode='w',format=scikits.audiolab.Format(),channels=1,samplerate=22050)
# output = np.zeros(735*(2+len(outs)))
# for i in range(0,len(outs)):
# #audio = outs[i]*window
# start = (i + 1)*alen
# end = start + alen
# rstart = start + alen/2 + (random.random() - 0.5) * (alen/10) #int(start - (alen/2) + alen*random.random())
# rend = rstart + alen
# output[start:end] += outs[i][0]
# output[rstart:rend] += outs[i][1]
# output[(rstart-alen):(rend-alen)] += outs[i][1]
#
# outwav.write_frames(output)
# outwav.sync()
#
cv2.destroyAllWindows()
| gpl-3.0 |
AlbertoBonfiglio/Robotics537 | Homework3/Homework3/classes/qlearner.py | 1 | 4743 | #!/usr/bin/env python
import numpy as np
from scipy import stats
import random
import math
import matplotlib.pyplot as plt
import uuid
UP = 1
RIGHT = 2
DOWN = 3
LEFT = 4
NOWHERE = 0
EXIT = 100
UNEXPLORED = -1
EXPLORED = 0
class Position(object):
def __init__(self):
self.x = 0
self.y = 0
def __eq__(self, other):
return self.__dict__ == other.__dict__
def getAvailableActions(self):
retval = [0,1,2,3,4] # 0 stay in place, 1 = up then right down left
if (self.x == 0):
retval.remove(LEFT)
if (self.x == 9):
retval.remove(RIGHT)
if (self.y == 0):
retval.remove(UP)
if (self.y == 4):
retval.remove(DOWN)
return retval
def move(self, action):
newposition = Position()
newposition.x = self.x
newposition.y = self.y
if action == UP:
newposition.y = self.y -1
if action == DOWN:
newposition.y = self.y +1
if action == RIGHT:
newposition.x = self.x +1
if action == LEFT:
newposition.x = self.x -1
return newposition
def randomize(self):
self.x = np.random.randint(0, 8)
self.y = np.random.randint(0, 4)
class Explorer(object):
def __init__(self):
self.environment_matrix = np.empty((5, 10), dtype=np.object)
for row in range(5):
for column in range(10):
pos = Position()
pos.y = row
pos.x = column
self.environment_matrix[row][column] = pos
self.current_state = Position()
self.exit_state = Position()
self.exit_state.y = 2
self.exit_state.x = 9
self.environment_matrix[2, 9] = self.exit_state
def getRandomStart(self):
return self.environment_matrix[np.random.randint(4)][np.random.randint(9)]
def findPath(self, explorations=500, epsilon=0, alpha=0.2, gamma=0.9):
retval= []
self.current_state = self.getRandomStart()
self.agent = qLearner(self.current_state, self.exit_state, epsilon, alpha, gamma)
for i in range(explorations):
action = self.agent.chooseAction(self.current_state)
last_state = self.current_state
move_state = last_state.move(action)
new_state = self.environment_matrix[move_state.y][move_state.x]
reward = -1
if new_state == self.exit_state:
reward = 100
self.agent.learn(last_state, action, reward, new_state)
self.current_state = new_state
self.agent.updateActions(new_state, action)
retval.append(self.agent.getQ(last_state, action))
return retval
class qLearner(object):
def __init__(self, state, exit, epsilon=0.0, alpha=0.2, gamma=0.9):
self.q = {}
self.epsilon = epsilon #e-greedy variable
self.alpha = alpha #learning rate
self.gamma = gamma #discount factor
self.actions = state.getAvailableActions()
self.exit = exit
def updateActions(self, state, lastAction):
self.actions = state.getAvailableActions()
#if lastAction in self.actions:
# self.actions.remove(lastAction)
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learn(self, state1, action1, reward, state2):
maxQNew = max([self.getQ(state2, a) for a in self.actions]) #max of all possible actions
self.learnQ(state1, action1, reward, reward + self.gamma * maxQNew)
def learnQ(self, state, action, reward, value):
oldValue = self.q.get((state, action), None)
if oldValue is None:
self.q[(state, action)] = reward
else:
#if state == self.exit:
# self.q[(state, action)] = self.q[(state, action)]
#else:
self.q[(state, action)] += self.alpha * (value - oldValue)
def chooseAction(self, state):
if random.random() < self.epsilon:
action = random.choice(self.actions)
else:
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
return action
| gpl-2.0 |
macks22/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
bthirion/nistats | examples/04_low_level_functions/write_events_file.py | 1 | 2980 | """Example of a events.tsv file generation: the neurospin/localizer events.
=============================================================================
The protocol described is the so-called "archi standard" localizer
event sequence. See Pinel et al., BMC neuroscience 2007 for reference
"""
print(__doc__)
#########################################################################
# Define the onset times in seconds. Those are typically extracted
# from the stimulation software used.
import numpy as np
onset = np.array([
0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,
41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,
96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,
143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,
173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,
212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,
248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,
284.4, 288., 291., 293.4, 296.7])
#########################################################################
# Associated trial types: these are numbered between 0 and 9, hence
# correspond to 10 different conditions
trial_idx = np.array(
[7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,
2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,
0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,
7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])
#########################################################################
# We may want to map these indices to explicit condition names.
# For that, we define a list of 10 strings.
condition_ids = ['horizontal checkerboard',
'vertical checkerboard',
'right button press, auditory instructions',
'left button press, auditory instructions',
'right button press, visual instructions',
'left button press, visual instructions',
'mental computation, auditory instructions',
'mental computation, visual instructions',
'visual sentence',
'auditory sentence']
trial_type = np.array([condition_ids[i] for i in trial_idx])
#########################################################################
# We also define a duration (required by BIDS conventions)
duration = np.ones_like(onset)
#########################################################################
# Form an event dataframe from these information
import pandas as pd
events = pd.DataFrame({'trial_type': trial_type,
'onset': onset,
'duration': duration})
#########################################################################
# Export them to a tsv file
tsvfile = 'localizer_events.tsv'
events.to_csv(tsvfile, sep='\t', index=False)
print("Created the events file in %s " % tsvfile)
| bsd-3-clause |
mwort/modelmanager | tests/test_grass.py | 1 | 3067 | from __future__ import print_function, absolute_import
import unittest
import sys
import os
import subprocess
import cProfile, pstats
import shutil
import pandas as pd
from test_project import create_project
from modelmanager.plugins.grass import GrassSession, GrassAttributeTable
TEST_SETTINGS = """
grass_db = "grassproject/testgrassdb"
grass_location = "testlocation"
grass_mapset = "testmapset"
"""
class testgrasstbl(GrassAttributeTable):
vector = 'testvector@PERMANENT'
key = 'cat'
obs = pd.DataFrame({1: [12, 2, 2, 4]})
def skip_if_py3(f):
"""Unittest skip test if PY3 decorator."""
PY2 = sys.version_info < (3, 0)
return f if PY2 else lambda self: print('not run in PY3.')
class TestGrass(unittest.TestCase):
projectdir = 'grassproject'
@classmethod
def setUpClass(self):
self.project = create_project(self.projectdir, TEST_SETTINGS)
# creat test grass db
locp = os.path.join(self.project.grass_db, self.project.grass_location)
subprocess.call(('grass -e -c EPSG:4632 '+locp).split())
# create test vector
vectorascii = os.path.join(self.project.projectdir, 'testvector.ascii')
with open(vectorascii, 'w') as f:
f.write("0|1|s1 \n 1|0|s2")
subprocess.call(['grass', locp+'/PERMANENT', '--exec', 'v.in.ascii',
'in='+vectorascii, 'out=testvector', '--q'])
def test_session(self):
with GrassSession(self.project, mapset='PERMANENT') as grass:
vects = grass.list_strings('vect')
self.assertIn('testvector@PERMANENT', vects)
return
def test_attribute_table(self):
self.project.settings(testgrasstbl)
self.assertTrue(hasattr(self.project, 'testgrasstbl'))
self.assertIsInstance(self.project.testgrasstbl.obs[1], pd.Series)
self.project.testgrasstbl['new'] = 1000
self.project.testgrasstbl.write()
self.project.testgrasstbl.read()
self.assertEqual(self.project.testgrasstbl['new'].mean(), 1000)
def test_subset_attribute_table(self):
class testgrasssubsettbl(testgrasstbl):
subset_columns = ['cat', 'int_2', 'str_1']
add_attributes = None
# read
self.project.settings(testgrasssubsettbl)
self.assertTrue(hasattr(self.project, 'testgrasssubsettbl'))
ptgt = self.project.testgrasssubsettbl
cols = [ptgt.index.name]+list(ptgt.columns)
self.assertEqual(cols, testgrasssubsettbl.subset_columns)
# write
self.project.testgrasssubsettbl['int_2'] = [9, 9]
self.project.testgrasssubsettbl.write()
self.project.testgrasssubsettbl.read()
self.assertEqual(sum(self.project.testgrasssubsettbl['int_2']), 18)
@classmethod
def tearDownClass(self):
shutil.rmtree(self.projectdir)
return
if __name__ == '__main__':
cProfile.run('unittest.main()', 'pstats')
# print profile stats ordered by time
pstats.Stats('pstats').strip_dirs().sort_stats('time').print_stats(5)
| bsd-3-clause |
diogo149/treeano | examples/spatial_transformer/helpers.py | 3 | 6571 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import itertools
import subprocess
import numpy as np
import sklearn.datasets
import sklearn.cross_validation
import sklearn.metrics
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
from treeano.sandbox.nodes import spatial_transformer as st
from treeano.sandbox.nodes import batch_normalization as bn
fX = theano.config.floatX
BATCH_SIZE = 500
# use the one from lasagne:
# https://github.com/Lasagne/Recipes/blob/master/examples/spatial_transformer_network.ipynb
CLUTTERED_MNIST_PATH = ("https://s3.amazonaws.com/lasagne/recipes/datasets/"
"mnist_cluttered_60x60_6distortions.npz")
def load_data():
# download data
subprocess.call(["wget", "-N", CLUTTERED_MNIST_PATH])
data = np.load("mnist_cluttered_60x60_6distortions.npz")
X_train, X_valid, X_test = [data[n].reshape((-1, 1, 60, 60))
for n in ["x_train", "x_valid", "x_test"]]
y_train, y_valid, y_test = [np.argmax(data[n], axis=-1).astype('int32')
for n in ["y_train", "y_valid", "y_test"]]
in_train = {"x": X_train, "y": y_train}
in_valid = {"x": X_valid, "y": y_valid}
in_test = {"x": X_test, "y": y_test}
print("Train samples:", X_train.shape)
print("Validation samples:", X_valid.shape)
print("Test samples:", X_test.shape)
return in_train, in_valid, in_test
def load_network(update_scale_factor):
localization_network = tn.HyperparameterNode(
"loc",
tn.SequentialNode(
"loc_seq",
[tn.DnnMaxPoolNode("loc_pool1"),
tn.DnnConv2DWithBiasNode("loc_conv1"),
tn.DnnMaxPoolNode("loc_pool2"),
bn.NoScaleBatchNormalizationNode("loc_bn1"),
tn.ReLUNode("loc_relu1"),
tn.DnnConv2DWithBiasNode("loc_conv2"),
bn.NoScaleBatchNormalizationNode("loc_bn2"),
tn.ReLUNode("loc_relu2"),
tn.DenseNode("loc_fc1", num_units=50),
bn.NoScaleBatchNormalizationNode("loc_bn3"),
tn.ReLUNode("loc_relu3"),
tn.DenseNode("loc_fc2",
num_units=6,
inits=[treeano.inits.NormalWeightInit(std=0.001)])]),
num_filters=20,
filter_size=(5, 5),
pool_size=(2, 2),
)
st_node = st.AffineSpatialTransformerNode(
"st",
localization_network,
output_shape=(20, 20))
model = tn.HyperparameterNode(
"model",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(None, 1, 60, 60)),
# scaling the updates of the spatial transformer
# seems to be very helpful, to allow the clasification
# net to learn what to look for, before prematurely
# looking
tn.UpdateScaleNode(
"st_update_scale",
st_node,
update_scale_factor=update_scale_factor),
tn.Conv2DWithBiasNode("conv1"),
tn.MaxPool2DNode("mp1"),
bn.NoScaleBatchNormalizationNode("bn1"),
tn.ReLUNode("relu1"),
tn.Conv2DWithBiasNode("conv2"),
tn.MaxPool2DNode("mp2"),
bn.NoScaleBatchNormalizationNode("bn2"),
tn.ReLUNode("relu2"),
tn.GaussianDropoutNode("do1"),
tn.DenseNode("fc1"),
bn.NoScaleBatchNormalizationNode("bn3"),
tn.ReLUNode("relu3"),
tn.DenseNode("fc2", num_units=10),
tn.SoftmaxNode("pred"),
]),
num_filters=32,
filter_size=(3, 3),
pool_size=(2, 2),
num_units=256,
dropout_probability=0.5,
inits=[treeano.inits.HeUniformInit()],
bn_update_moving_stats=True,
)
with_updates = tn.HyperparameterNode(
"with_updates",
tn.AdamNode(
"adam",
{"subtree": model,
"cost": tn.TotalCostNode("cost", {
"pred": tn.ReferenceNode("pred_ref", reference="model"),
"target": tn.InputNode("y", shape=(None,), dtype="int32")},
)}),
cost_function=treeano.utils.categorical_crossentropy_i32,
learning_rate=2e-3,
)
network = with_updates.network()
network.build() # build eagerly to share weights
return network
def train_network(network, in_train, in_valid, max_iters):
valid_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="valid_time"),
canopy.handlers.override_hyperparameters(deterministic=True),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"valid_cost": "cost", "pred": "pred"})
def validate(in_dict, result_dict):
valid_out = valid_fn(in_valid)
probabilities = valid_out.pop("pred")
predicted_classes = np.argmax(probabilities, axis=1)
result_dict["valid_accuracy"] = sklearn.metrics.accuracy_score(
in_valid["y"], predicted_classes)
result_dict.update(valid_out)
train_fn = canopy.handled_fn(
network,
[canopy.handlers.time_call(key="total_time"),
canopy.handlers.call_after_every(1, validate),
canopy.handlers.time_call(key="train_time"),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x", "y"])],
{"x": "x", "y": "y"},
{"train_cost": "cost"},
include_updates=True)
def callback(results_dict):
print("{_iter:3d}: "
"train_cost: {train_cost:0.3f} "
"valid_cost: {valid_cost:0.3f} "
"valid_accuracy: {valid_accuracy:0.3f}".format(**results_dict))
print("Starting training...")
canopy.evaluate_until(fn=train_fn,
gen=itertools.repeat(in_train),
max_iters=max_iters,
callback=callback)
def test_fn(network):
return canopy.handled_fn(
network,
[canopy.handlers.override_hyperparameters(deterministic=True),
canopy.handlers.batch_pad(batch_size=BATCH_SIZE, keys=["x"]),
canopy.handlers.chunk_variables(batch_size=BATCH_SIZE,
variables=["x"])],
{"x": "x"},
{"transformed": "st"})
| apache-2.0 |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| gpl-3.0 |
adpozuelo/Master | RC/PEC4/sis_plot.py | 1 | 1211 | ## RC - UOC - URV - PEC4
## [email protected]
## run with 'python3 sis_plot.py'
import numpy as np
np.set_printoptions(threshold=np.nan)
import matplotlib.pyplot as plt
import csv
def readData(filename):
with open(filename) as csvfile:
result = []
myreader = csv.reader(csvfile, delimiter = ' ')
for row in myreader:
result.append([float(x) for x in row])
return np.array(result, dtype='float')
def plotData(filename):
data = readData(filename)
x = data[:,0]
y = data[:,1]
plt.plot(x, y)
plt.title('SIS (' + filename + ')')
plt.xlabel('beta')
plt.ylabel('rho')
plt.xlim((0,1))
plt.ylim((0,1))
#plt.show()
plt.savefig(filename + '.png')
plt.clf()
return
files = ['./er_N500_p01_mu01_rho01.txt', './er_N500_p01_mu05_rho01.txt', './er_N500_p01_mu09_rho01.txt', './er_N1000_p0025_mu01_rho01.txt', './er_N1000_p0025_mu05_rho01.txt', './er_N1000_p0025_mu09_rho01.txt', 'ba_N500_m4_mu01_rho01.txt', 'ba_N500_m4_mu05_rho01.txt', 'ba_N500_m4_mu09_rho01.txt', 'ba_N1000_m2_mu01_rho01.txt', 'ba_N1000_m2_mu05_rho01.txt', 'ba_N1000_m2_mu09_rho01.txt']
for file in files:
plotData(file)
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.11/_downloads/plot_make_forward.py | 20 | 2669 | """
======================================================
Create a forward operator and display sensitivity maps
======================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
subjects_dir = data_path + '/subjects'
# Note that forward solutions can also be read with read_forward_solution
fwd = mne.make_forward_solution(raw_fname, trans, src, bem,
fname=None, meg=True, eeg=True, mindist=5.0,
n_jobs=2, overwrite=True)
# convert to surface orientation for better visualization
fwd = mne.convert_forward_solution(fwd, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
| bsd-3-clause |
xzh86/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
ver228/tierpsy-tracker | tierpsy/debugging/check_roi_flow.py | 1 | 1273 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 17:59:36 2018
@author: avelinojaver
"""
import numpy as np
import cv2
from functools import partial
import json
from pathlib import Path
import pandas as pd
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
mask_file = Path('/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11314_Ch1_04072017_103259.hdf5')
root_dir = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/'
for mask_file in list(Path(root_dir).glob('*.hdf5')):
skeletons_file = mask_file.parent / 'Results' / (mask_file.stem + '_skeletons.hdf5')
with pd.HDFStore(str(skeletons_file), "r") as ske_file_id:
#attribute useful to understand if we are dealing with dark or light worms
bgnd_param = ske_file_id.get_node('/plate_worms')._v_attrs['bgnd_param']
bgnd_param = json.loads(bgnd_param.decode("utf-8"))
print(bgnd_param)
#%%
ROIs_generator = generateMoviesROI(masked_image_file,
trajectories_data,
bgnd_param = bgnd_param,
progress_prefix = '')
for frame_props in ROIs_generator:
break
| mit |
sodafree/backend | build/ipython/build/lib.linux-i686-2.7/IPython/lib/tests/test_irunner_pylab_magic.py | 3 | 3890 | """Test suite for pylab_import_all magic
Modified from the irunner module but using regex.
"""
# Global to make tests extra verbose and help debugging
VERBOSE = True
# stdlib imports
import StringIO
import sys
import unittest
import re
# IPython imports
from IPython.lib import irunner
from IPython.testing import decorators
def pylab_not_importable():
"""Test if importing pylab fails with RuntimeError (true when having no display)"""
try:
import pylab
return False
except RuntimeError:
return True
# Testing code begins
class RunnerTestCase(unittest.TestCase):
def setUp(self):
self.out = StringIO.StringIO()
#self.out = sys.stdout
def _test_runner(self,runner,source,output):
"""Test that a given runner's input/output match."""
runner.run_source(source)
out = self.out.getvalue()
#out = ''
# this output contains nasty \r\n lineends, and the initial ipython
# banner. clean it up for comparison, removing lines of whitespace
output_l = [l for l in output.splitlines() if l and not l.isspace()]
out_l = [l for l in out.splitlines() if l and not l.isspace()]
mismatch = 0
if len(output_l) != len(out_l):
message = ("Mismatch in number of lines\n\n"
"Expected:\n"
"~~~~~~~~~\n"
"%s\n\n"
"Got:\n"
"~~~~~~~~~\n"
"%s"
) % ("\n".join(output_l), "\n".join(out_l))
self.fail(message)
for n in range(len(output_l)):
# Do a line-by-line comparison
ol1 = output_l[n].strip()
ol2 = out_l[n].strip()
if not re.match(ol1,ol2):
mismatch += 1
if VERBOSE:
print '<<< line %s does not match:' % n
print repr(ol1)
print repr(ol2)
print '>>>'
self.assert_(mismatch==0,'Number of mismatched lines: %s' %
mismatch)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_enabled(self):
"Verify that plot is available when pylab_import_all = True"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = True
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = True
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: True
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
@decorators.skipif_not_matplotlib
@decorators.skipif(pylab_not_importable, "Likely a run without X.")
def test_pylab_import_all_disabled(self):
"Verify that plot is not available when pylab_import_all = False"
source = """
from IPython.config.application import Application
app = Application.instance()
app.pylab_import_all = False
pylab
ip=get_ipython()
'plot' in ip.user_ns
"""
output = """
In \[1\]: from IPython\.config\.application import Application
In \[2\]: app = Application\.instance\(\)
In \[3\]: app\.pylab_import_all = False
In \[4\]: pylab
^Welcome to pylab, a matplotlib-based Python environment
For more information, type 'help\(pylab\)'\.
In \[5\]: ip=get_ipython\(\)
In \[6\]: \'plot\' in ip\.user_ns
Out\[6\]: False
"""
runner = irunner.IPythonRunner(out=self.out)
self._test_runner(runner,source,output)
| bsd-3-clause |
Coder-Yu/SDLib | method/CoDetector.py | 1 | 6767 | from baseclass.SDetection import SDetection
from sklearn.metrics import classification_report
import numpy as np
from tool import config
from collections import defaultdict
from math import log,exp
from sklearn.tree import DecisionTreeClassifier
#CoDetector: Collaborative Shilling Detection Bridging Factorization and User Embedding
class CoDetector(SDetection):
def __init__(self, conf, trainingSet=None, testSet=None, labels=None, fold='[1]'):
super(CoDetector, self).__init__(conf, trainingSet, testSet, labels, fold)
def readConfiguration(self):
super(CoDetector, self).readConfiguration()
extraSettings = config.LineConfig(self.config['CoDetector'])
self.k = int(extraSettings['-k'])
self.negCount = int(extraSettings['-negCount']) # the number of negative samples
if self.negCount < 1:
self.negCount = 1
self.regR = float(extraSettings['-gamma'])
self.filter = int(extraSettings['-filter'])
learningRate = config.LineConfig(self.config['learnRate'])
self.lRate = float(learningRate['-init'])
self.maxLRate = float(learningRate['-max'])
self.maxIter = int(self.config['num.max.iter'])
regular = config.LineConfig(self.config['reg.lambda'])
self.regU, self.regI = float(regular['-u']), float(regular['-i'])
def printAlgorConfig(self):
super(CoDetector, self).printAlgorConfig()
print 'k: %d' % self.negCount
print 'regR: %.5f' % self.regR
print 'filter: %d' % self.filter
print '=' * 80
def initModel(self):
super(CoDetector, self).initModel()
self.w = np.random.rand(len(self.dao.all_User)+1) / 20 # bias value of user
self.c = np.random.rand(len(self.dao.all_User)+1)/ 20 # bias value of context
self.G = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # context embedding
self.P = np.random.rand(len(self.dao.all_User)+1, self.k) / 20 # latent user matrix
self.Q = np.random.rand(len(self.dao.all_Item)+1, self.k) / 20 # latent item matrix
# constructing SPPMI matrix
self.SPPMI = defaultdict(dict)
D = len(self.dao.user)
print 'Constructing SPPMI matrix...'
# for larger data set has many items, the process will be time consuming
occurrence = defaultdict(dict)
for user1 in self.dao.all_User:
iList1, rList1 = self.dao.allUserRated(user1)
if len(iList1) < self.filter:
continue
for user2 in self.dao.all_User:
if user1 == user2:
continue
if not occurrence[user1].has_key(user2):
iList2, rList2 = self.dao.allUserRated(user2)
if len(iList2) < self.filter:
continue
count = len(set(iList1).intersection(set(iList2)))
if count > self.filter:
occurrence[user1][user2] = count
occurrence[user2][user1] = count
maxVal = 0
frequency = {}
for user1 in occurrence:
frequency[user1] = sum(occurrence[user1].values()) * 1.0
D = sum(frequency.values()) * 1.0
# maxx = -1
for user1 in occurrence:
for user2 in occurrence[user1]:
try:
val = max([log(occurrence[user1][user2] * D / (frequency[user1] * frequency[user2]), 2) - log(
self.negCount, 2), 0])
except ValueError:
print self.SPPMI[user1][user2]
print self.SPPMI[user1][user2] * D / (frequency[user1] * frequency[user2])
if val > 0:
if maxVal < val:
maxVal = val
self.SPPMI[user1][user2] = val
self.SPPMI[user2][user1] = self.SPPMI[user1][user2]
# normalize
for user1 in self.SPPMI:
for user2 in self.SPPMI[user1]:
self.SPPMI[user1][user2] = self.SPPMI[user1][user2] / maxVal
def buildModel(self):
# Jointly decompose R(ratings) and SPPMI with shared user latent factors P
iteration = 0
while iteration < self.maxIter:
self.loss = 0
self.dao.ratings = dict(self.dao.trainingSet_u, **self.dao.testSet_u)
for user in self.dao.ratings:
for item in self.dao.ratings[user]:
rating = self.dao.ratings[user][item]
error = rating - self.predictRating(user,item)
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
p = self.P[u]
q = self.Q[i]
self.loss += error ** 2
# update latent vectors
self.P[u] += self.lRate * (error * q - self.regU * p)
self.Q[i] += self.lRate * (error * p - self.regI * q)
for user in self.SPPMI:
u = self.dao.all_User[user]
p = self.P[u]
for context in self.SPPMI[user]:
v = self.dao.all_User[context]
m = self.SPPMI[user][context]
g = self.G[v]
diff = (m - p.dot(g) - self.w[u] - self.c[v])
self.loss += diff ** 2
# update latent vectors
self.P[u] += self.lRate * diff * g
self.G[v] += self.lRate * diff * p
self.w[u] += self.lRate * diff
self.c[v] += self.lRate * diff
self.loss += self.regU * (self.P * self.P).sum() + self.regI * (self.Q * self.Q).sum() + self.regR * (self.G * self.G).sum()
iteration += 1
print 'iteration:',iteration
# preparing examples
self.training = []
self.trainingLabels = []
self.test = []
self.testLabels = []
for user in self.dao.trainingSet_u:
self.training.append(self.P[self.dao.all_User[user]])
self.trainingLabels.append(self.labels[user])
for user in self.dao.testSet_u:
self.test.append(self.P[self.dao.all_User[user]])
self.testLabels.append(self.labels[user])
def predictRating(self,user,item):
u = self.dao.all_User[user]
i = self.dao.all_Item[item]
return self.P[u].dot(self.Q[i])
def predict(self):
classifier = DecisionTreeClassifier(criterion='entropy')
classifier.fit(self.training, self.trainingLabels)
pred_labels = classifier.predict(self.test)
print 'Decision Tree:'
return pred_labels | gpl-3.0 |
ELind77/gensim | gensim/test/test_sklearn_integration.py | 1 | 38316 | import six
import unittest
import numpy
import os
import codecs
import pickle
from scipy import sparse
try:
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_files
from sklearn import linear_model, cluster
from sklearn.exceptions import NotFittedError
except ImportError:
raise unittest.SkipTest("Test requires scikit-learn to be installed, which is not available")
from gensim.sklearn_integration.sklearn_wrapper_gensim_rpmodel import SklRpModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklLdaModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import SklLsiModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldaseqmodel import SklLdaSeqModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_w2vmodel import SklW2VModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_atmodel import SklATModel
from gensim.corpora import mmcorpus, Dictionary
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
datapath_ldaseq = lambda fname: os.path.join(module_path, 'test_data/DTM', fname)
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
author2doc = {'john': [0, 1, 2, 3, 4, 5, 6], 'jane': [2, 3, 4, 5, 6, 7, 8], 'jack': [0, 2, 4, 6, 8], 'jill': [1, 3, 5, 7]}
texts_new = texts[0:3]
author2doc_new = {'jill': [0], 'bob': [0, 1], 'sally': [1, 2]}
dictionary_new = Dictionary(texts_new)
corpus_new = [dictionary_new.doc2bow(text) for text in texts_new]
texts_ldaseq = [
[u'senior', u'studios', u'studios', u'studios', u'creators', u'award', u'mobile', u'currently', u'challenges', u'senior', u'summary', u'senior', u'motivated', u'creative', u'senior'],
[u'performs', u'engineering', u'tasks', u'infrastructure', u'focusing', u'primarily', u'programming', u'interaction', u'designers', u'engineers', u'leadership', u'teams', u'teams', u'crews', u'responsibilities', u'engineering', u'quality', u'functional', u'functional', u'teams', u'organizing', u'prioritizing', u'technical', u'decisions', u'engineering', u'participates', u'participates', u'reviews', u'participates', u'hiring', u'conducting', u'interviews'],
[u'feedback', u'departments', u'define', u'focusing', u'engineering', u'teams', u'crews', u'facilitate', u'engineering', u'departments', u'deadlines', u'milestones', u'typically', u'spends', u'designing', u'developing', u'updating', u'bugs', u'mentoring', u'engineers', u'define', u'schedules', u'milestones', u'participating'],
[u'reviews', u'interviews', u'sized', u'teams', u'interacts', u'disciplines', u'knowledge', u'skills', u'knowledge', u'knowledge', u'xcode', u'scripting', u'debugging', u'skills', u'skills', u'knowledge', u'disciplines', u'animation', u'networking', u'expertise', u'competencies', u'oral', u'skills', u'management', u'skills', u'proven', u'effectively', u'teams', u'deadline', u'environment', u'bachelor', u'minimum', u'shipped', u'leadership', u'teams', u'location', u'resumes', u'jobs', u'candidates', u'openings', u'jobs'],
[u'maryland', u'client', u'producers', u'electricity', u'operates', u'storage', u'utility', u'retail', u'customers', u'engineering', u'consultant', u'maryland', u'summary', u'technical', u'technology', u'departments', u'expertise', u'maximizing', u'output', u'reduces', u'operating', u'participates', u'areas', u'engineering', u'conducts', u'testing', u'solve', u'supports', u'environmental', u'understands', u'objectives', u'operates', u'responsibilities', u'handles', u'complex', u'engineering', u'aspects', u'monitors', u'quality', u'proficiency', u'optimization', u'recommendations', u'supports', u'personnel', u'troubleshooting', u'commissioning', u'startup', u'shutdown', u'supports', u'procedure', u'operating', u'units', u'develops', u'simulations', u'troubleshooting', u'tests', u'enhancing', u'solving', u'develops', u'estimates', u'schedules', u'scopes', u'understands', u'technical', u'management', u'utilize', u'routine', u'conducts', u'hazards', u'utilizing', u'hazard', u'operability', u'methodologies', u'participates', u'startup', u'reviews', u'pssr', u'participate', u'teams', u'participate', u'regulatory', u'audits', u'define', u'scopes', u'budgets', u'schedules', u'technical', u'management', u'environmental', u'awareness', u'interfacing', u'personnel', u'interacts', u'regulatory', u'departments', u'input', u'objectives', u'identifying', u'introducing', u'concepts', u'solutions', u'peers', u'customers', u'coworkers', u'knowledge', u'skills', u'engineering', u'quality', u'engineering'],
[u'commissioning', u'startup', u'knowledge', u'simulators', u'technologies', u'knowledge', u'engineering', u'techniques', u'disciplines', u'leadership', u'skills', u'proven', u'engineers', u'oral', u'skills', u'technical', u'skills', u'analytically', u'solve', u'complex', u'interpret', u'proficiency', u'simulation', u'knowledge', u'applications', u'manipulate', u'applications', u'engineering'],
[u'calculations', u'programs', u'matlab', u'excel', u'independently', u'environment', u'proven', u'skills', u'effectively', u'multiple', u'tasks', u'planning', u'organizational', u'management', u'skills', u'rigzone', u'jobs', u'developer', u'exceptional', u'strategies', u'junction', u'exceptional', u'strategies', u'solutions', u'solutions', u'biggest', u'insurers', u'operates', u'investment'],
[u'vegas', u'tasks', u'electrical', u'contracting', u'expertise', u'virtually', u'electrical', u'developments', u'institutional', u'utilities', u'technical', u'experts', u'relationships', u'credibility', u'contractors', u'utility', u'customers', u'customer', u'relationships', u'consistently', u'innovations', u'profile', u'construct', u'envision', u'dynamic', u'complex', u'electrical', u'management', u'grad', u'internship', u'electrical', u'engineering', u'infrastructures', u'engineers', u'documented', u'management', u'engineering', u'quality', u'engineering', u'electrical', u'engineers', u'complex', u'distribution', u'grounding', u'estimation', u'testing', u'procedures', u'voltage', u'engineering'],
[u'troubleshooting', u'installation', u'documentation', u'bsee', u'certification', u'electrical', u'voltage', u'cabling', u'electrical', u'engineering', u'candidates', u'electrical', u'internships', u'oral', u'skills', u'organizational', u'prioritization', u'skills', u'skills', u'excel', u'cadd', u'calculation', u'autocad', u'mathcad', u'skills', u'skills', u'customer', u'relationships', u'solving', u'ethic', u'motivation', u'tasks', u'budget', u'affirmative', u'diversity', u'workforce', u'gender', u'orientation', u'disability', u'disabled', u'veteran', u'vietnam', u'veteran', u'qualifying', u'veteran', u'diverse', u'candidates', u'respond', u'developing', u'workplace', u'reflects', u'diversity', u'communities', u'reviews', u'electrical', u'contracting', u'southwest', u'electrical', u'contractors'],
[u'intern', u'electrical', u'engineering', u'idexx', u'laboratories', u'validating', u'idexx', u'integrated', u'hardware', u'entails', u'planning', u'debug', u'validation', u'engineers', u'validation', u'methodologies', u'healthcare', u'platforms', u'brightest', u'solve', u'challenges', u'innovation', u'technology', u'idexx', u'intern', u'idexx', u'interns', u'supplement', u'interns', u'teams', u'roles', u'competitive', u'interns', u'idexx', u'interns', u'participate', u'internships', u'mentors', u'seminars', u'topics', u'leadership', u'workshops', u'relevant', u'planning', u'topics', u'intern', u'presentations', u'mixers', u'applicants', u'ineligible', u'laboratory', u'compliant', u'idexx', u'laboratories', u'healthcare', u'innovation', u'practicing', u'veterinarians', u'diagnostic', u'technology', u'idexx', u'enhance', u'veterinarians', u'efficiency', u'economically', u'idexx', u'worldwide', u'diagnostic', u'tests', u'tests', u'quality', u'headquartered', u'idexx', u'laboratories', u'employs', u'customers', u'qualifications', u'applicants', u'idexx', u'interns', u'potential', u'demonstrated', u'portfolio', u'recommendation', u'resumes', u'marketing', u'location', u'americas', u'verification', u'validation', u'schedule', u'overtime', u'idexx', u'laboratories', u'reviews', u'idexx', u'laboratories', u'nasdaq', u'healthcare', u'innovation', u'practicing', u'veterinarians'],
[u'location', u'duration', u'temp', u'verification', u'validation', u'tester', u'verification', u'validation', u'middleware', u'specifically', u'testing', u'applications', u'clinical', u'laboratory', u'regulated', u'environment', u'responsibilities', u'complex', u'hardware', u'testing', u'clinical', u'analyzers', u'laboratory', u'graphical', u'interfaces', u'complex', u'sample', u'sequencing', u'protocols', u'developers', u'correction', u'tracking', u'tool', u'timely', u'troubleshoot', u'testing', u'functional', u'manual', u'automated', u'participate', u'ongoing'],
[u'testing', u'coverage', u'planning', u'documentation', u'testing', u'validation', u'corrections', u'monitor', u'implementation', u'recurrence', u'operating', u'statistical', u'quality', u'testing', u'global', u'multi', u'teams', u'travel', u'skills', u'concepts', u'waterfall', u'agile', u'methodologies', u'debugging', u'skills', u'complex', u'automated', u'instrumentation', u'environment', u'hardware', u'mechanical', u'components', u'tracking', u'lifecycle', u'management', u'quality', u'organize', u'define', u'priorities', u'organize', u'supervision', u'aggressive', u'deadlines', u'ambiguity', u'analyze', u'complex', u'situations', u'concepts', u'technologies', u'verbal', u'skills', u'effectively', u'technical', u'clinical', u'diverse', u'strategy', u'clinical', u'chemistry', u'analyzer', u'laboratory', u'middleware', u'basic', u'automated', u'testing', u'biomedical', u'engineering', u'technologists', u'laboratory', u'technology', u'availability', u'click', u'attach'],
[u'scientist', u'linux', u'asrc', u'scientist', u'linux', u'asrc', u'technology', u'solutions', u'subsidiary', u'asrc', u'engineering', u'technology', u'contracts'],
[u'multiple', u'agencies', u'scientists', u'engineers', u'management', u'personnel', u'allows', u'solutions', u'complex', u'aeronautics', u'aviation', u'management', u'aviation', u'engineering', u'hughes', u'technical', u'technical', u'aviation', u'evaluation', u'engineering', u'management', u'technical', u'terminal', u'surveillance', u'programs', u'currently', u'scientist', u'travel', u'responsibilities', u'develops', u'technology', u'modifies', u'technical', u'complex', u'reviews', u'draft', u'conformity', u'completeness', u'testing', u'interface', u'hardware', u'regression', u'impact', u'reliability', u'maintainability', u'factors', u'standardization', u'skills', u'travel', u'programming', u'linux', u'environment', u'cisco', u'knowledge', u'terminal', u'environment', u'clearance', u'clearance', u'input', u'output', u'digital', u'automatic', u'terminal', u'management', u'controller', u'termination', u'testing', u'evaluating', u'policies', u'procedure', u'interface', u'installation', u'verification', u'certification', u'core', u'avionic', u'programs', u'knowledge', u'procedural', u'testing', u'interfacing', u'hardware', u'regression', u'impact', u'reliability', u'maintainability', u'factors', u'standardization', u'missions', u'asrc', u'subsidiaries', u'affirmative', u'employers', u'applicants', u'disability', u'veteran', u'technology', u'location', u'airport', u'bachelor', u'schedule', u'travel', u'contributor', u'management', u'asrc', u'reviews'],
[u'technical', u'solarcity', u'niche', u'vegas', u'overview', u'resolving', u'customer', u'clients', u'expanding', u'engineers', u'developers', u'responsibilities', u'knowledge', u'planning', u'adapt', u'dynamic', u'environment', u'inventive', u'creative', u'solarcity', u'lifecycle', u'responsibilities', u'technical', u'analyzing', u'diagnosing', u'troubleshooting', u'customers', u'ticketing', u'console', u'escalate', u'knowledge', u'engineering', u'timely', u'basic', u'phone', u'functionality', u'customer', u'tracking', u'knowledgebase', u'rotation', u'configure', u'deployment', u'sccm', u'technical', u'deployment', u'deploy', u'hardware', u'solarcity', u'bachelor', u'knowledge', u'dell', u'laptops', u'analytical', u'troubleshooting', u'solving', u'skills', u'knowledge', u'databases', u'preferably', u'server', u'preferably', u'monitoring', u'suites', u'documentation', u'procedures', u'knowledge', u'entries', u'verbal', u'skills', u'customer', u'skills', u'competitive', u'solar', u'package', u'insurance', u'vacation', u'savings', u'referral', u'eligibility', u'equity', u'performers', u'solarcity', u'affirmative', u'diversity', u'workplace', u'applicants', u'orientation', u'disability', u'veteran', u'careerrookie'],
[u'embedded', u'exelis', u'junction', u'exelis', u'embedded', u'acquisition', u'networking', u'capabilities', u'classified', u'customer', u'motivated', u'develops', u'tests', u'innovative', u'solutions', u'minimal', u'supervision', u'paced', u'environment', u'enjoys', u'assignments', u'interact', u'multi', u'disciplined', u'challenging', u'focused', u'embedded', u'developments', u'spanning', u'engineering', u'lifecycle', u'specification', u'enhancement', u'applications', u'embedded', u'freescale', u'applications', u'android', u'platforms', u'interface', u'customers', u'developers', u'refine', u'specifications', u'architectures'],
[u'java', u'programming', u'scripts', u'python', u'debug', u'debugging', u'emulators', u'regression', u'revisions', u'specialized', u'setups', u'capabilities', u'subversion', u'technical', u'documentation', u'multiple', u'engineering', u'techexpousa', u'reviews'],
[u'modeler', u'semantic', u'modeling', u'models', u'skills', u'ontology', u'resource', u'framework', u'schema', u'technologies', u'hadoop', u'warehouse', u'oracle', u'relational', u'artifacts', u'models', u'dictionaries', u'models', u'interface', u'specifications', u'documentation', u'harmonization', u'mappings', u'aligned', u'coordinate', u'technical', u'peer', u'reviews', u'stakeholder', u'communities', u'impact', u'domains', u'relationships', u'interdependencies', u'models', u'define', u'analyze', u'legacy', u'models', u'corporate', u'databases', u'architectural', u'alignment', u'customer', u'expertise', u'harmonization', u'modeling', u'modeling', u'consulting', u'stakeholders', u'quality', u'models', u'storage', u'agile', u'specifically', u'focus', u'modeling', u'qualifications', u'bachelors', u'accredited', u'modeler', u'encompass', u'evaluation', u'skills', u'knowledge', u'modeling', u'techniques', u'resource', u'framework', u'schema', u'technologies', u'unified', u'modeling', u'technologies', u'schemas', u'ontologies', u'sybase', u'knowledge', u'skills', u'interpersonal', u'skills', u'customers', u'clearance', u'applicants', u'eligibility', u'classified', u'clearance', u'polygraph', u'techexpousa', u'solutions', u'partnership', u'solutions', u'integration'],
[u'technologies', u'junction', u'develops', u'maintains', u'enhances', u'complex', u'diverse', u'intensive', u'analytics', u'algorithm', u'manipulation', u'management', u'documented', u'individually', u'reviews', u'tests', u'components', u'adherence', u'resolves', u'utilizes', u'methodologies', u'environment', u'input', u'components', u'hardware', u'offs', u'reuse', u'cots', u'gots', u'synthesis', u'components', u'tasks', u'individually', u'analyzes', u'modifies', u'debugs', u'corrects', u'integrates', u'operating', u'environments', u'develops', u'queries', u'databases', u'repositories', u'recommendations', u'improving', u'documentation', u'develops', u'implements', u'algorithms', u'functional', u'assists', u'developing', u'executing', u'procedures', u'components', u'reviews', u'documentation', u'solutions', u'analyzing', u'conferring', u'users', u'engineers', u'analyzing', u'investigating', u'areas', u'adapt', u'hardware', u'mathematical', u'models', u'predict', u'outcome', u'implement', u'complex', u'database', u'repository', u'interfaces', u'queries', u'bachelors', u'accredited', u'substituted', u'bachelors', u'firewalls', u'ipsec', u'vpns', u'technology', u'administering', u'servers', u'apache', u'jboss', u'tomcat', u'developing', u'interfaces', u'firefox', u'internet', u'explorer', u'operating', u'mainframe', u'linux', u'solaris', u'virtual', u'scripting', u'programming', u'oriented', u'programming', u'ajax', u'script', u'procedures', u'cobol', u'cognos', u'fusion', u'focus', u'html', u'java', u'java', u'script', u'jquery', u'perl', u'visual', u'basic', u'powershell', u'cots', u'cots', u'oracle', u'apex', u'integration', u'competitive', u'package', u'bonus', u'corporate', u'equity', u'tuition', u'reimbursement', u'referral', u'bonus', u'holidays', u'insurance', u'flexible', u'disability', u'insurance'],
[u'technologies', u'disability', u'accommodation', u'recruiter', u'techexpousa'],
['bank', 'river', 'shore', 'water'],
['river', 'water', 'flow', 'fast', 'tree'],
['bank', 'water', 'fall', 'flow'],
['bank', 'bank', 'water', 'rain', 'river'],
['river', 'water', 'mud', 'tree'],
['money', 'transaction', 'bank', 'finance'],
['bank', 'borrow', 'money'],
['bank', 'finance'],
['finance', 'money', 'sell', 'bank'],
['borrow', 'sell'],
['bank', 'loan', 'sell']
]
sstats_ldaseq = numpy.loadtxt(datapath_ldaseq('sstats_test.txt'))
dictionary_ldaseq = Dictionary(texts_ldaseq)
corpus_ldaseq = [dictionary_ldaseq.doc2bow(text) for text in texts_ldaseq]
w2v_texts = [
['calculus', 'is', 'the', 'mathematical', 'study', 'of', 'continuous', 'change'],
['geometry', 'is', 'the', 'study', 'of', 'shape'],
['algebra', 'is', 'the', 'study', 'of', 'generalizations', 'of', 'arithmetic', 'operations'],
['differential', 'calculus', 'is', 'related', 'to', 'rates', 'of', 'change', 'and', 'slopes', 'of', 'curves'],
['integral', 'calculus', 'is', 'realted', 'to', 'accumulation', 'of', 'quantities', 'and', 'the', 'areas', 'under', 'and', 'between', 'curves'],
['physics', 'is', 'the', 'natural', 'science', 'that', 'involves', 'the', 'study', 'of', 'matter', 'and', 'its', 'motion', 'and', 'behavior', 'through', 'space', 'and', 'time'],
['the', 'main', 'goal', 'of', 'physics', 'is', 'to', 'understand', 'how', 'the', 'universe', 'behaves'],
['physics', 'also', 'makes', 'significant', 'contributions', 'through', 'advances', 'in', 'new', 'technologies', 'that', 'arise', 'from', 'theoretical', 'breakthroughs'],
['advances', 'in', 'the', 'understanding', 'of', 'electromagnetism', 'or', 'nuclear', 'physics', 'led', 'directly', 'to', 'the', 'development', 'of', 'new', 'products', 'that', 'have', 'dramatically', 'transformed', 'modern', 'day', 'society']
]
class TestSklLdaModelWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = SklLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([0.87, 0.13])
passed = numpy.allclose(sorted(transformed[0]), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testCSRMatrixConversion(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sarr = sparse.csr_matrix(arr)
newmodel = SklLdaModel(num_topics=2, passes=100)
newmodel.fit(sarr)
bow = [(0, 1), (1, 2), (2, 0)]
transformed_vec = newmodel.transform(bow)
expected_vec = numpy.array([0.35367903, 0.64632097])
passed = numpy.allclose(sorted(transformed_vec), sorted(expected_vec), atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
model = SklLdaModel(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary(map(lambda x: x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline((('features', model,), ('classifier', clf)))
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# updating multiple params
param_dict = {"eval_every": 20, "decay": 0.7}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(sorted(loaded_matrix), sorted(original_matrix), atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lda_wrapper = SklLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
texts_new = ['graph', 'eulerian']
bow = lda_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lda_wrapper.transform, bow)
class TestSklLsiModelWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = SklLsiModel(id2word=dictionary, num_topics=2)
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([1.39, 1e-12])
passed = numpy.allclose(sorted(transformed[0]), sorted(expected), atol=1)
self.assertTrue(passed)
def testPipeline(self):
model = SklLsiModel(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary(map(lambda x: x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lsi = Pipeline((('features', model,), ('classifier', clf)))
text_lsi.fit(corpus, data.target)
score = text_lsi.score(corpus, data.target)
self.assertGreater(score, 0.50)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# updating multiple params
param_dict = {"chunksize": 10000, "decay": 0.9}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(sorted(loaded_matrix), sorted(original_matrix), atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lsi_wrapper = SklLsiModel(id2word=dictionary, num_topics=2)
texts_new = ['graph', 'eulerian']
bow = lsi_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lsi_wrapper.transform, bow)
class TestSklLdaSeqModelWrapper(unittest.TestCase):
def setUp(self):
self.model = SklLdaSeqModel(id2word=dictionary_ldaseq, num_topics=2, time_slice=[10, 10, 11], initialize='own', sstats=sstats_ldaseq)
self.model.fit(corpus_ldaseq)
def testTransform(self):
# transforming two documents
docs = []
docs.append(list(corpus_ldaseq)[0])
docs.append(list(corpus_ldaseq)[1])
transformed_vecs = self.model.transform(docs)
self.assertEqual(transformed_vecs.shape[0], 2)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
# transforming one document
doc = list(corpus_ldaseq)[0]
transformed_vecs = self.model.transform(doc)
self.assertEqual(transformed_vecs.shape[0], 1)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# updating multiple params
param_dict = {"passes": 20, "chunksize": 200}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
test_data = data.data[0:2]
test_target = data.target[0:2]
id2word = Dictionary(map(lambda x: x.split(), test_data))
corpus = [id2word.doc2bow(i.split()) for i in test_data]
model = SklLdaSeqModel(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_ldaseq = Pipeline((('features', model,), ('classifier', clf)))
text_ldaseq.fit(corpus, test_target)
score = text_ldaseq.score(corpus, test_target)
self.assertGreater(score, 0.50)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(corpus_ldaseq)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(sorted(loaded_transformed_vecs), sorted(original_transformed_vecs), atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
ldaseq_wrapper = SklLdaSeqModel(num_topics=2)
doc = list(corpus_ldaseq)[0]
self.assertRaises(NotFittedError, ldaseq_wrapper.transform, doc)
class TestSklRpModelWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(13)
self.model = SklRpModel(num_topics=2)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform two documents
docs = []
docs.append(list(self.corpus)[0])
docs.append(list(self.corpus)[1])
matrix = self.model.transform(docs)
self.assertEqual(matrix.shape[0], 2)
self.assertEqual(matrix.shape[1], self.model.num_topics)
# tranform one document
doc = list(self.corpus)[0]
matrix = self.model.transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = SklRpModel(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary(map(lambda x: x.split(), data.data))
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_rp = Pipeline((('features', model,), ('classifier', clf)))
text_rp.fit(corpus, data.target)
score = text_rp.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(self.corpus)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(sorted(loaded_transformed_vecs), sorted(original_transformed_vecs), atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
rpmodel_wrapper = SklRpModel(num_topics=2)
doc = list(self.corpus)[0]
self.assertRaises(NotFittedError, rpmodel_wrapper.transform, doc)
class TestSklW2VModelWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = SklW2VModel(size=10, min_count=0, seed=42)
self.model.fit(texts)
def testTransform(self):
# tranform multiple words
words = []
words = words + texts[0]
matrix = self.model.transform(words)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.size)
# tranform one word
word = texts[0][0]
matrix = self.model.transform(word)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.size)
def testSetGetParams(self):
# updating only one param
self.model.set_params(negative=20)
model_params = self.model.get_params()
self.assertEqual(model_params["negative"], 20)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = SklW2VModel(size=10, min_count=1)
model.fit(w2v_texts)
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
('calculus', 'mathematics'), ('mathematical', 'mathematics'), ('geometry', 'mathematics'), ('operations', 'mathematics'), ('curves', 'mathematics'),
('natural', 'physics'), ('nuclear', 'physics'), ('science', 'physics'), ('electromagnetism', 'physics'), ('natural', 'physics')
]
train_input = list(map(lambda x: x[0], train_data))
train_target = list(map(lambda x: class_dict[x[1]], train_data))
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
clf.fit(model.transform(train_input), train_target)
text_w2v = Pipeline((('features', model,), ('classifier', clf)))
score = text_w2v.score(train_input, train_target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
word = texts[0][0]
loaded_transformed_vecs = model_load.transform(word)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.size)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(word)
passed = numpy.allclose(sorted(loaded_transformed_vecs), sorted(original_transformed_vecs), atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
w2vmodel_wrapper = SklW2VModel(size=10, min_count=0, seed=42)
word = texts[0][0]
self.assertRaises(NotFittedError, w2vmodel_wrapper.transform, word)
class TestSklATModelWrapper(unittest.TestCase):
def setUp(self):
self.model = SklATModel(id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
self.model.fit(corpus)
def testTransform(self):
# transforming multiple authors
author_list = ['jill', 'jack']
author_topics = self.model.transform(author_list)
self.assertEqual(author_topics.shape[0], 2)
self.assertEqual(author_topics.shape[1], self.model.num_topics)
# transforming one author
jill_topics = self.model.transform('jill')
self.assertEqual(jill_topics.shape[0], 1)
self.assertEqual(jill_topics.shape[1], self.model.num_topics)
def testPartialFit(self):
self.model.partial_fit(corpus_new, author2doc=author2doc_new)
# Did we learn something about Sally?
output_topics = self.model.transform('sally')
sally_topics = output_topics[0] # getting the topics corresponding to 'sally' (from the list of lists)
self.assertTrue(all(sally_topics > 0))
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# updating multiple params
param_dict = {"passes": 5, "iterations": 10}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
def testPipeline(self):
# train the AuthorTopic model first
model = SklATModel(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
model.fit(corpus)
# create and train clustering model
clstr = cluster.MiniBatchKMeans(n_clusters=2)
authors_full = ['john', 'jane', 'jack', 'jill']
clstr.fit(model.transform(authors_full))
# stack together the two models in a pipeline
text_atm = Pipeline((('features', model,), ('cluster', clstr)))
author_list = ['jane', 'jack', 'jill']
ret_val = text_atm.predict(author_list)
self.assertEqual(len(ret_val), len(author_list))
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
SimonHL/TSA | rnn.py | 1 | 2602 | import numpy
import theano
import theano.tensor as TT
import utilities.datagenerator as DG
import matplotlib.pyplot as plt
# number of hidden units
n = 5
# number of input units
nin = 7
# number of output units
nout = 1
# input (where first dimension is time)
u = TT.matrix()
# target (where first dimension is time)
t = TT.matrix()
# initial hidden state of the RNN
h0 = TT.vector()
# learning rate
lr = TT.scalar()
# recurrent weights as a shared variable
W = theano.shared(numpy.random.uniform(size=(n, n), low=-.01, high=.01))
# input to hidden layer weights
W_in = theano.shared(numpy.random.uniform(size=(nin, n), low=-.01, high=.01))
# hidden to output layer weights
W_out = theano.shared(numpy.random.uniform(size=(n, nout), low=-.01, high=.01))
# recurrent function (using tanh activation function) and linear output
# activation function
def step(u_t, h_tm1, W, W_in, W_out):
h_t = TT.tanh(TT.dot(u_t, W_in) + TT.dot(h_tm1, W))
y_t = TT.dot(h_t, W_out)
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entrie sequence `y` (first dimension is always time)
[h, y], _ = theano.scan(step,
sequences=u,
outputs_info=[h0, None],
non_sequences=[W, W_in, W_out])
# error between output and target
error = ((y - t) ** 2).sum()
# gradients on the weights using BPTT
gW, gW_in, gW_out = TT.grad(error, [W, W_in, W_out])
# training function, that computes the error and updates the weights using
# SGD.
fn = theano.function([h0, u, t, lr],
error,
updates=[(W, W - lr * gW),
(W_in, W_in - lr * gW_in),
(W_out, W_out - lr * gW_out)])
fn_sim = theano.function([h0,u], y)
g = DG.Generator()
data_x_,data_y_ = g.get_data('mackey_glass')
N = 1000
data_x_ = data_x_[:N]
data_y_ = data_y_[:N]
learning_rate = 0.0005
sampleNum = data_y_.shape[0]-nin # how many group_x can be constructed
data_x = numpy.zeros((sampleNum,nin))
data_y = numpy.zeros((sampleNum,nout))
for i in numpy.arange(sampleNum):
data_x[i] = data_y_[i:i+nin]
data_y[i] = data_y_[i+nin]
dtype = theano.config.floatX
h_init = numpy.zeros(shape=(n,), dtype=dtype)
n_epoch = 500
for i in numpy.arange(n_epoch):
print '{}.{}: cost=: {} '.format(i,0,fn(h_init,data_x,data_y,learning_rate))
y_sim = fn_sim(h_init,data_x)
plt.plot(numpy.arange(y_sim.shape[0]), y_sim, 'r')
plt.plot(numpy.arange(y_sim.shape[0]), data_y, 'k')
plt.plot(numpy.arange(y_sim.shape[0]), y_sim - data_y, 'g')
plt.show()
| bsd-2-clause |
Titan-C/sphinx-gallery | sphinx_gallery/tests/conftest.py | 2 | 6981 | # -*- coding: utf-8 -*-
"""
Pytest fixtures
"""
from __future__ import division, absolute_import, print_function
import collections
from contextlib import contextmanager
from io import StringIO
import os
import shutil
import pytest
import sphinx
from sphinx.application import Sphinx
from sphinx.errors import ExtensionError
from sphinx.util.docutils import docutils_namespace
from sphinx_gallery import (docs_resolv, gen_gallery, gen_rst, utils,
sphinx_compatibility, py_source_parser)
from sphinx_gallery.scrapers import _import_matplotlib
from sphinx_gallery.utils import _get_image
def pytest_report_header(config, startdir):
"""Add information to the pytest run header."""
return 'Sphinx: %s (%s)' % (sphinx.__version__, sphinx.__file__)
Params = collections.namedtuple('Params', 'args kwargs')
class FakeSphinxApp:
def __init__(self):
self.calls = collections.defaultdict(list)
def status_iterator(self, *args, **kwargs):
self.calls['status_iterator'].append(Params(args, kwargs))
for it in args[0]:
yield it
def warning(self, *args, **kwargs):
self.calls['warning'].append(Params(args, kwargs))
def warn(self, *args, **kwargs):
self.calls['warn'].append(Params(args, kwargs))
def info(self, *args, **kwargs):
self.calls['info'].append(Params(args, kwargs))
def verbose(self, *args, **kwargs):
self.calls['verbose'].append(Params(args, kwargs))
def debug(self, *args, **kwargs):
self.calls['debug'].append(Params(args, kwargs))
@pytest.fixture
def gallery_conf(tmpdir):
"""Set up a test sphinx-gallery configuration."""
app = utils.Bunch()
app.add_css_file = lambda x: None
app.config = dict(source_suffix={'.rst': None})
gallery_conf = gen_gallery._complete_gallery_conf(
{}, str(tmpdir), True, False, app=app)
gallery_conf.update(examples_dir=str(tmpdir), gallery_dir=str(tmpdir))
return gallery_conf
@pytest.fixture
def fakesphinxapp():
orig_app = sphinx_compatibility._app
sphinx_compatibility._app = app = FakeSphinxApp()
try:
yield app
finally:
sphinx_compatibility._app = orig_app
@pytest.fixture
def log_collector():
orig_dr_logger = docs_resolv.logger
orig_gg_logger = gen_gallery.logger
orig_gr_logger = gen_rst.logger
orig_ps_logger = py_source_parser.logger
app = FakeSphinxApp()
docs_resolv.logger = app
gen_gallery.logger = app
py_source_parser.logger = app
gen_rst.logger = app
try:
yield app
finally:
docs_resolv.logger = orig_dr_logger
gen_gallery.logger = orig_gg_logger
gen_rst.logger = orig_gr_logger
py_source_parser.logger = orig_ps_logger
@pytest.fixture
def unicode_sample(tmpdir):
"""Return temporary python source file with Unicode in various places"""
code_str = b"""# -*- coding: utf-8 -*-
'''
\xc3\x9anicode in header
=================
U\xc3\xb1icode in description
'''
# Code source: \xc3\x93scar N\xc3\xa1jera
# License: BSD 3 clause
import os
path = os.path.join('a','b')
a = 'hei\xc3\x9f' # Unicode string
import sphinx_gallery.back_references as br
br.identify_names
from sphinx_gallery.back_references import identify_names
identify_names
"""
fname = tmpdir.join("unicode_sample.py")
fname.write(code_str, 'wb')
return fname.strpath
@pytest.fixture
def req_mpl_jpg(tmpdir, req_mpl, scope='session'):
"""Raise SkipTest if JPEG support is not available."""
# mostly this is needed because of
# https://github.com/matplotlib/matplotlib/issues/16083
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(range(10))
try:
plt.savefig(str(tmpdir.join('testplot.jpg')))
except Exception as exp:
pytest.skip('Matplotlib jpeg saving failed: %s' % (exp,))
finally:
plt.close(fig)
@pytest.fixture(scope='session')
def req_mpl():
try:
_import_matplotlib()
except (ImportError, ValueError):
pytest.skip('Test requires matplotlib')
@pytest.fixture(scope='session')
def req_pil():
try:
_get_image()
except ExtensionError:
pytest.skip('Test requires pillow')
@pytest.fixture
def conf_file(request):
try:
env = request.node.get_closest_marker('conf_file')
except AttributeError: # old pytest
env = request.node.get_marker('conf_file')
kwargs = env.kwargs if env else {}
result = {
'content': "",
'extensions': ['sphinx_gallery.gen_gallery'],
}
result.update(kwargs)
return result
class SphinxAppWrapper(object):
"""Wrapper for sphinx.application.Application.
This allows control over when the sphinx application is initialized, since
part of the sphinx-gallery build is done in
sphinx.application.Application.__init__ and the remainder is done in
sphinx.application.Application.build.
"""
def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
**kwargs):
self.srcdir = srcdir
self.confdir = confdir
self.outdir = outdir
self.doctreedir = doctreedir
self.buildername = buildername
self.kwargs = kwargs
def create_sphinx_app(self):
# Avoid warnings about re-registration, see:
# https://github.com/sphinx-doc/sphinx/issues/5038
with self.create_sphinx_app_context() as app:
pass
return app
@contextmanager
def create_sphinx_app_context(self):
with docutils_namespace():
app = Sphinx(self.srcdir, self.confdir, self.outdir,
self.doctreedir, self.buildername, **self.kwargs)
sphinx_compatibility._app = app
yield app
def build_sphinx_app(self, *args, **kwargs):
with self.create_sphinx_app_context() as app:
# building should be done in the same docutils_namespace context
app.build(*args, **kwargs)
return app
@pytest.fixture
def sphinx_app_wrapper(tmpdir, conf_file, req_mpl, req_pil):
_fixturedir = os.path.join(os.path.dirname(__file__), 'testconfs')
srcdir = os.path.join(str(tmpdir), "config_test")
shutil.copytree(_fixturedir, srcdir)
shutil.copytree(os.path.join(_fixturedir, "src"),
os.path.join(str(tmpdir), "examples"))
base_config = """
import os
import sphinx_gallery
extensions = %r
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'Sphinx-Gallery <Tests>'\n\n
""" % (conf_file['extensions'],)
with open(os.path.join(srcdir, "conf.py"), "w") as conffile:
conffile.write(base_config + conf_file['content'])
return SphinxAppWrapper(
srcdir, srcdir, os.path.join(srcdir, "_build"),
os.path.join(srcdir, "_build", "toctree"), "html", warning=StringIO(),
status=StringIO())
| bsd-3-clause |
jat255/hyperspyUI | hyperspyui/plugins/moviesaver.py | 2 | 7108 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
from hyperspyui.plugins.plugin import Plugin
import matplotlib as mpl
import matplotlib.animation as animation
import os
import sys
from qtpy import QtCore, QtWidgets
from qtpy.QtWidgets import QLineEdit, QCheckBox
def tr(text):
return QtCore.QCoreApplication.translate("MovieSaver", text)
# =============================================================================
# Check that we have a valid writer
writer = mpl.rcParams['animation.writer']
writers = animation.writers
if writer in writers.avail:
writer = writers[writer]()
else:
import warnings
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers[writers.list()[0]]()
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
del writer
# =============================================================================
class MovieSaver(Plugin):
name = "Movie Saver"
def create_actions(self):
self.add_action(self.name + '.save', self.name, self.save,
icon="../images/video.svg",
tip="")
def create_menu(self):
self.add_menuitem('File', self.ui.actions[self.name + '.save'])
def create_toolbars(self):
self.add_toolbar_button(
'File',
self.ui.actions[
self.name +
'.save'])
def save(self, wrapper=None, fps=None, fname=None, dpi=None):
if wrapper is None:
wrapper = self.ui.get_selected_wrapper()
signal = wrapper.signal
# TODO: Input: writer type, FPS, file, resolution/dpi [, bitrate ++]
dlg = MovieArgsPrompt(self.ui)
fname = os.path.join(os.path.dirname(self.ui.cur_dir), wrapper.name)
fname += os.path.extsep + "mp4"
dlg.edt_fname.setText(fname)
dlg_w = self.ui.show_okcancel_dialog(tr("Save movie"), dlg)
if dlg_w.result() == QtWidgets.QDialog.Accepted:
# Setup writer:
fps = dlg.num_fps.value()
codec = dlg.edt_codec.text()
if not codec:
codec = None
extra = dlg.edt_extra.text()
if extra:
extra = list(extra.split())
else:
extra = None
if dlg.chk_verbose.isChecked():
old_verbose = mpl.verbose.level
mpl.verbose.level = 'debug'
if extra:
extra.extend(['-v', 'debug'])
else:
extra = ['-v', 'debug']
metadata = signal.metadata.as_dictionary()
writer = mpl.rcParams['animation.writer']
writers = animation.writers
if writer in writers.avail:
writer = writers[writer](fps=fps, metadata=metadata,
codec=codec, extra_args=extra)
else:
import warnings
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers[writers.list()[0]](fps=fps,
metadata=metadata)
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
fname = dlg.edt_fname.text()
dpi = dlg.num_dpi.value()
fig = signal._plot.signal_plot.figure
# Set figure props:
if not dlg.chk_colorbar.isChecked():
cb_ax = signal._plot.signal_plot._colorbar.ax
fig.delaxes(cb_ax)
if not dlg.chk_axes.isChecked():
signal._plot.signal_plot.ax.set_axis_off()
try:
with writer.saving(fig, fname, dpi):
for idx in signal.axes_manager:
QtWidgets.QApplication.processEvents()
writer.grab_frame()
finally:
# Reset figure props:
if dlg.chk_verbose.isChecked():
mpl.verbose.level = old_verbose
if not dlg.chk_colorbar.isChecked():
fig.add_axes(cb_ax)
if not dlg.chk_axes.isChecked():
signal._plot.signal_plot.ax.set_axis_on()
fig.canvas.draw()
class MovieArgsPrompt(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MovieArgsPrompt, self).__init__(parent)
self.create_controls()
def create_controls(self):
self.num_fps = QtWidgets.QDoubleSpinBox()
self.num_fps.setValue(15.0)
self.num_fps.setMinimum(0.001)
self.edt_fname = QLineEdit()
self.btn_browse = QtWidgets.QPushButton("...")
self.num_dpi = QtWidgets.QSpinBox()
self.num_dpi.setValue(72)
self.num_dpi.setMinimum(1)
self.num_dpi.setMaximum(10000)
self.chk_axes = QCheckBox("Axes")
self.chk_colorbar = QCheckBox("Colorbar")
# codec = mpl.rcParams['animation.codec']
codec = 'h264'
self.edt_codec = QLineEdit(codec)
self.edt_extra = QLineEdit("-preset veryslow -crf 0")
# TODO: Use QCompleter or QComboBox for codecs
# TODO: Use QCompleter for 'extra' history
# TODO: Bitrate and/or quality slider
self.chk_verbose = QCheckBox("Verbose")
try:
sys.stdout.fileno()
except:
self.chk_verbose.setEnabled(False)
self.chk_verbose.setToolTip("Verbose output does not work with " +
"internal console.")
frm = QtWidgets.QFormLayout()
frm.addRow(tr("FPS:"), self.num_fps)
frm.addRow(tr("DPI:"), self.num_dpi)
frm.addRow(tr("Codec:"), self.edt_codec)
frm.addRow(tr("Extra args:"), self.edt_extra)
frm.addRow(self.chk_axes, self.chk_colorbar)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.edt_fname)
hbox.addWidget(self.btn_browse)
frm.addRow(tr("File:"), hbox)
frm.addRow("", self.chk_verbose)
self.setLayout(frm)
| gpl-3.0 |
ChanderG/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/tests/test_naive_bayes.py | 11 | 21805 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2./3, 1./3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_model_selection/test_importances.py | 1 | 17181 | # tests.test_model_selection.test_importances
# Test the feature importance visualizers
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Created: Fri Mar 02 15:23:22 2018 -0500
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_importances.py [] [email protected] $
"""
Test the feature importance visualizers
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from yellowbrick.exceptions import NotFitted
from yellowbrick.model_selection.importances import *
from yellowbrick.datasets import load_occupancy, load_concrete
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.linear_model import LogisticRegression, Lasso
from unittest import mock
from tests.base import VisualTestCase
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Feature Importances Tests
##########################################################################
class TestFeatureImportancesVisualizer(VisualTestCase):
"""
Test FeatureImportances visualizer
"""
def test_integration_feature_importances(self):
"""
Integration test of visualizer with feature importances param
"""
# Load the test dataset
X, y = load_occupancy(return_dataset=True).to_numpy()
fig = plt.figure()
ax = fig.add_subplot()
clf = GradientBoostingClassifier(random_state=42)
viz = FeatureImportances(clf, ax=ax)
viz.fit(X, y)
viz.finalize()
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=13.0)
def test_integration_coef(self):
"""
Integration test of visualizer with coef param
"""
# Load the test dataset
dataset = load_concrete(return_dataset=True)
X, y = dataset.to_numpy()
features = dataset.meta["features"]
fig = plt.figure()
ax = fig.add_subplot()
reg = Lasso(random_state=42)
features = list(map(lambda s: s.title(), features))
viz = FeatureImportances(reg, ax=ax, labels=features, relative=False)
viz.fit(X, y)
viz.finalize()
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=16.2)
def test_integration_quick_method(self):
"""
Integration test of quick method
"""
# Load the test dataset
X, y = load_occupancy(return_dataset=True).to_numpy()
fig = plt.figure()
ax = fig.add_subplot()
clf = RandomForestClassifier(random_state=42)
g = feature_importances(clf, X, y, ax=ax, show=False)
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(g, tol=15.0)
def test_fit_no_importances_model(self):
"""
Fitting a model without feature importances raises an exception
"""
X = np.random.rand(100, 42)
y = np.random.rand(100)
visualizer = FeatureImportances(MockEstimator())
expected_error = "could not find feature importances param on MockEstimator"
with pytest.raises(YellowbrickTypeError, match=expected_error):
visualizer.fit(X, y)
def test_fit_sorted_params(self):
"""
On fit, sorted features_ and feature_importances_ params are created
"""
coefs = np.array([0.4, 0.2, 0.08, 0.07, 0.16, 0.23, 0.38, 0.1, 0.05])
names = np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"])
model = MockEstimator()
model.make_importance_param(value=coefs)
visualizer = FeatureImportances(model, labels=names)
visualizer.fit(np.random.rand(100, len(names)), np.random.rand(100))
assert hasattr(visualizer, "features_")
assert hasattr(visualizer, "feature_importances_")
# get the expected sort index
sort_idx = np.argsort(coefs)
# assert sorted
npt.assert_array_equal(names[sort_idx], visualizer.features_)
npt.assert_array_equal(coefs[sort_idx], visualizer.feature_importances_)
def test_fit_relative(self):
"""
Test fit computes relative importances
"""
coefs = np.array([0.4, 0.2, 0.08, 0.07, 0.16, 0.23, 0.38, 0.1, 0.05])
model = MockEstimator()
model.make_importance_param(value=coefs)
visualizer = FeatureImportances(model, relative=True)
visualizer.fit(np.random.rand(100, len(coefs)), np.random.rand(100))
expected = 100.0 * coefs / coefs.max()
expected.sort()
npt.assert_array_equal(visualizer.feature_importances_, expected)
def test_fit_not_relative(self):
"""
Test fit stores unmodified importances
"""
coefs = np.array([0.4, 0.2, 0.08, 0.07, 0.16, 0.23, 0.38, 0.1, 0.05])
model = MockEstimator()
model.make_importance_param(value=coefs)
visualizer = FeatureImportances(model, relative=False)
visualizer.fit(np.random.rand(100, len(coefs)), np.random.rand(100))
coefs.sort()
npt.assert_array_equal(visualizer.feature_importances_, coefs)
def test_fit_absolute(self):
"""
Test fit with absolute values
"""
coefs = np.array([0.4, 0.2, -0.08, 0.07, 0.16, 0.23, -0.38, 0.1, -0.05])
model = MockEstimator()
model.make_importance_param(value=coefs)
# Test absolute value
visualizer = FeatureImportances(model, absolute=True, relative=False)
visualizer.fit(np.random.rand(100, len(coefs)), np.random.rand(100))
expected = np.array([0.05, 0.07, 0.08, 0.1, 0.16, 0.2, 0.23, 0.38, 0.4])
npt.assert_array_equal(visualizer.feature_importances_, expected)
# Test no absolute value
visualizer = FeatureImportances(model, absolute=False, relative=False)
visualizer.fit(np.random.rand(100, len(coefs)), np.random.rand(100))
expected = np.array([-0.38, -0.08, -0.05, 0.07, 0.1, 0.16, 0.2, 0.23, 0.4])
npt.assert_array_equal(visualizer.feature_importances_, expected)
def test_multi_coefs(self):
"""
Test fit with multidimensional coefficients and stack warning
"""
coefs = np.array(
[
[0.4, 0.2, -0.08, 0.07, 0.16, 0.23, -0.38, 0.1, -0.05],
[0.41, 0.12, -0.1, 0.1, 0.14, 0.21, 0.01, 0.31, -0.15],
[0.31, 0.2, -0.01, 0.1, 0.22, 0.23, 0.01, 0.12, -0.15],
]
)
model = MockEstimator()
model.make_importance_param(value=coefs)
visualizer = FeatureImportances(model, stack=False)
with pytest.warns(YellowbrickWarning):
visualizer.fit(
np.random.rand(100, len(np.mean(coefs, axis=0))), np.random.rand(100)
)
npt.assert_equal(visualizer.feature_importances_.ndim, 1)
def test_multi_coefs_stacked(self):
"""
Test stack plot with multidimensional coefficients
"""
X, y = load_iris(True)
viz = FeatureImportances(
LogisticRegression(solver="liblinear", random_state=222), stack=True
)
viz.fit(X, y)
viz.finalize()
npt.assert_equal(viz.feature_importances_.shape, (3, 4))
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=17.5)
@pytest.mark.skipif(pd is None, reason="pandas is required for this test")
def test_fit_dataframe(self):
"""
Ensure feature names are extracted from DataFrame columns
"""
labels = ["a", "b", "c", "d", "e", "f"]
df = pd.DataFrame(np.random.rand(100, 6), columns=labels)
s = pd.Series(np.random.rand(100), name="target")
assert df.shape == (100, 6)
model = MockEstimator()
model.make_importance_param(value=np.linspace(0, 1, 6))
visualizer = FeatureImportances(model)
visualizer.fit(df, s)
assert hasattr(visualizer, "features_")
npt.assert_array_equal(visualizer.features_, np.array(df.columns))
def test_fit_makes_labels(self):
"""
Assert that the fit process makes label indices
"""
model = MockEstimator()
model.make_importance_param(value=np.linspace(0, 1, 10))
visualizer = FeatureImportances(model)
visualizer.fit(np.random.rand(100, 10), np.random.rand(100))
# Don't have to worry about label space since importances are linspace
assert hasattr(visualizer, "features_")
npt.assert_array_equal(np.arange(10), visualizer.features_)
def test_fit_calls_draw(self):
"""
Assert that fit calls draw
"""
model = MockEstimator()
model.make_importance_param("coef_")
visualizer = FeatureImportances(model)
with mock.patch.object(visualizer, "draw") as mdraw:
visualizer.fit(np.random.rand(100, 42), np.random.rand(100))
mdraw.assert_called_once()
def test_draw_raises_unfitted(self):
"""
Assert draw raises exception when not fitted
"""
visualizer = FeatureImportances(Lasso())
with pytest.raises(NotFitted):
visualizer.draw()
def test_find_importances_param(self):
"""
Test the expected parameters can be found
"""
params = ("feature_importances_", "coef_")
for param in params:
model = MockEstimator()
model.make_importance_param(param, "foo")
visualizer = FeatureImportances(model)
assert hasattr(model, param), "expected '{}' missing".format(param)
for oparam in params:
if oparam == param:
continue
assert not hasattr(model, oparam), "unexpected '{}'".format(oparam)
importances = visualizer._find_importances_param()
assert importances == "foo"
def test_find_importances_param_priority(self):
"""
With both feature_importances_ and coef_, one has priority
"""
model = MockEstimator()
model.make_importance_param("feature_importances_", "foo")
model.make_importance_param("coef_", "bar")
visualizer = FeatureImportances(model)
assert hasattr(model, "feature_importances_")
assert hasattr(model, "coef_")
importances = visualizer._find_importances_param()
assert importances == "foo"
def test_find_importances_param_not_found(self):
"""
Raises an exception when importances param not found
"""
model = MockEstimator()
visualizer = FeatureImportances(model)
assert not hasattr(model, "feature_importances_")
assert not hasattr(model, "coef_")
with pytest.raises(YellowbrickTypeError):
visualizer._find_importances_param()
def test_find_classes_param_not_found(self):
"""
Raises an exception when classes param not found
"""
model = MockClassifier()
visualizer = FeatureImportances(model)
assert not hasattr(model, "classes_")
e = "could not find classes_ param on {}".format(
visualizer.estimator.__class__.__name__
)
with pytest.raises(YellowbrickTypeError, match=e):
visualizer._find_classes_param()
def test_xlabel(self):
"""
Check the various xlabels are sensical
"""
model = MockEstimator()
model.make_importance_param("feature_importances_")
visualizer = FeatureImportances(model, xlabel="foo", relative=True)
# Assert the visualizer uses the user supplied xlabel
assert visualizer._get_xlabel() == "foo", "could not set user xlabel"
# Check the visualizer default relative xlabel
visualizer.set_params(xlabel=None)
assert "relative" in visualizer._get_xlabel()
# Check value xlabel with default
visualizer.set_params(relative=False)
assert "relative" not in visualizer._get_xlabel()
# Check coeficients
model = MockEstimator()
model.make_importance_param("coef_")
visualizer = FeatureImportances(model, xlabel="baz", relative=True)
# Assert the visualizer uses the user supplied xlabel
assert visualizer._get_xlabel() == "baz", "could not set user xlabel"
# Check the visualizer default relative xlabel
visualizer.set_params(xlabel=None)
assert "coefficient" in visualizer._get_xlabel()
assert "relative" in visualizer._get_xlabel()
# Check value xlabel with default
visualizer.set_params(relative=False)
assert "coefficient" in visualizer._get_xlabel()
assert "relative" not in visualizer._get_xlabel()
def test_is_fitted(self):
"""
Test identification if is fitted
"""
visualizer = FeatureImportances(Lasso())
assert not visualizer._is_fitted()
visualizer.features_ = "foo"
assert not visualizer._is_fitted()
visualizer.feature_importances_ = "bar"
assert visualizer._is_fitted()
del visualizer.features_
assert not visualizer._is_fitted()
def test_with_fitted(self):
"""
Test that visualizer properly handles an already-fitted model
"""
X, y = load_concrete(return_dataset=True).to_numpy()
model = Lasso().fit(X, y)
with mock.patch.object(model, "fit") as mockfit:
oz = FeatureImportances(model)
oz.fit(X, y)
mockfit.assert_not_called()
with mock.patch.object(model, "fit") as mockfit:
oz = FeatureImportances(model, is_fitted=True)
oz.fit(X, y)
mockfit.assert_not_called()
with mock.patch.object(model, "fit") as mockfit:
oz = FeatureImportances(model, is_fitted=False)
oz.fit(X, y)
mockfit.assert_called_once_with(X, y)
def test_topn_stacked(self):
"""
Test stack plot with only the three most important features by sum of
each feature's importance across all classes
"""
X, y = load_iris(True)
viz = FeatureImportances(
LogisticRegression(solver="liblinear", random_state=222),
stack=True, topn=3
)
viz.fit(X, y)
viz.finalize()
npt.assert_equal(viz.feature_importances_.shape, (3, 3))
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=17.5)
def test_topn_negative_stacked(self):
"""
Test stack plot with only the three least important features by sum of
each feature's importance across all classes
"""
X, y = load_iris(True)
viz = FeatureImportances(
LogisticRegression(solver="liblinear", random_state=222),
stack=True, topn=-3
)
viz.fit(X, y)
viz.finalize()
npt.assert_equal(viz.feature_importances_.shape, (3, 3))
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=17.5)
def test_topn(self):
"""
Test plot with only top three important features by absolute value
"""
X, y = load_iris(True)
viz = FeatureImportances(
GradientBoostingClassifier(random_state=42), topn=3
)
viz.fit(X, y)
viz.finalize()
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=17.5)
def test_topn_negative(self):
"""
Test plot with only the three least important features by absolute value
"""
X, y = load_iris(True)
viz = FeatureImportances(
GradientBoostingClassifier(random_state=42), topn=-3
)
viz.fit(X, y)
viz.finalize()
# Appveyor and Linux conda non-text-based differences
self.assert_images_similar(viz, tol=17.5)
##########################################################################
## Mock Estimator
##########################################################################
class MockEstimator(BaseEstimator):
"""
Creates params when fit is called on demand.
"""
def make_importance_param(self, name="feature_importances_", value=None):
if value is None:
value = np.random.rand(42)
setattr(self, name, value)
def fit(self, X, y=None, **kwargs):
return self
class MockClassifier(BaseEstimator, ClassifierMixin):
"""
Creates empty classifier.
"""
pass
| apache-2.0 |
shusenl/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
biosustain/memote | src/memote/suite/tests/test_annotation.py | 2 | 22622 | # -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests performed on the annotations of an instance of ``cobra.Model``."""
from __future__ import absolute_import, division
from builtins import dict
import pytest
import memote.support.annotation as annotation
from memote.utils import annotate, get_ids, truncate, wrapper
@annotate(title="Presence of Metabolite Annotation", format_type="count")
def test_metabolite_annotation_presence(model):
"""
Expect all metabolites to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each metabolite, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have metabolites and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Metabolite object of the
model is unset or empty.
"""
ann = test_metabolite_annotation_presence.annotation
ann["data"] = get_ids(
annotation.find_components_without_annotation(model, "metabolites")
)
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""A total of {} metabolites ({:.2%}) lack any form of annotation:
{}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Reaction Annotation", format_type="count")
def test_reaction_annotation_presence(model):
"""
Expect all reactions to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each reaction, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have reactions and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Reaction object of the
model is unset or empty.
"""
ann = test_reaction_annotation_presence.annotation
ann["data"] = get_ids(
annotation.find_components_without_annotation(model, "reactions")
)
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""A total of {} reactions ({:.2%}) lack any form of annotation:
{}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Gene Annotation", format_type="count")
def test_gene_product_annotation_presence(model):
"""
Expect all genes to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field (extended by FBC package) for each gene product,
irrespective of the type of annotation i.e. specific database,
cross-references, ontology terms, additional information. For this test to
pass the model is expected to have genes and each of them should have some
form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Gene object of the
model is unset or empty.
"""
ann = test_gene_product_annotation_presence.annotation
ann["data"] = get_ids(annotation.find_components_without_annotation(model, "genes"))
ann["metric"] = len(ann["data"]) / len(model.genes)
ann["message"] = wrapper.fill(
"""A total of {} genes ({:.2%}) lack any form of
annotation: {}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(
title="Metabolite Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_metabolite_annotation_overview(model, db):
"""
Expect all metabolites to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each metabolite annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all metabolites consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Metabolite of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_metabolite_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.metabolites, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.metabolites)
ann["message"][db] = wrapper.fill(
"""The following {} metabolites ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.REACTION_ANNOTATIONS))
@annotate(
title="Reaction Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_reaction_annotation_overview(model, db):
"""
Expect all reactions to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each reaction annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all reactions consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Reaction of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_reaction_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.reactions, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""The following {} reactions ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.GENE_PRODUCT_ANNOTATIONS))
@annotate(
title="Gene Annotations Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_gene_product_annotation_overview(model, db):
"""
Expect all genes to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each gene annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all gene products consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Gene of
the model match with a selection of common genome databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_gene_product_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(model.genes, db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""The following {} genes ({:.2%}) lack annotation for {}:
{}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(
title="Metabolite Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_metabolite_annotation_wrong_ids(model, db):
"""
Expect all annotations of metabolites to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in metabolite annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those metabolites whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_metabolite_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.metabolites).difference(
annotation.generate_component_annotation_overview(model.metabolites, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no metabolite annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.metabolites, "metabolites", db
)
)
ann["metric"][db] = len(ann["data"][db]) / len(total)
ann["message"][db] = wrapper.fill(
"""A total of {} metabolite annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.REACTION_ANNOTATIONS)
@annotate(
title="Reaction Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_reaction_annotation_wrong_ids(model, db):
"""
Expect all annotations of reactions to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those reaction whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_reaction_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.reactions).difference(
annotation.generate_component_annotation_overview(model.reactions, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no reaction annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.reactions, "reactions", db
)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""A total of {} reaction annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.GENE_PRODUCT_ANNOTATIONS)
@annotate(
title="Gene Annotation Conformity Per Database",
format_type="percent",
message=dict(),
data=dict(),
metric=dict(),
)
def test_gene_product_annotation_wrong_ids(model, db):
"""
Expect all annotations of genes/gene-products to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those genes whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_gene_product_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.genes).difference(
annotation.generate_component_annotation_overview(model.genes, db)
)
)
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no gene annotations for the {} database.
""".format(
db
)
)
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(model.genes, "genes", db)
)
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""A total of {} gene annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db, truncate(ann["data"][db])
)
)
assert len(ann["data"][db]) == 0, ann["message"][db]
@annotate(title="Uniform Metabolite Identifier Namespace", format_type="count")
def test_metabolite_id_namespace_consistency(model):
"""
Expect metabolite identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for metabolites consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main metabolite identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a table with each column corresponding to one
database from the selection and each row to a metabolite identifier. A
Boolean entry indicates whether the identifier matches the regular
expression of the corresponding database. Since the Biocyc pattern matches
broadly, we assume that any instance of an identifier matching to Biocyc
AND any other database pattern is a false positive match for Biocyc and
thus set it to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_metabolite_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(model, "metabolites")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(
set(get_ids(model.metabolites)).difference(
overview[overview[largest]].index.tolist()
)
)
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""{} metabolite identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Uniform Reaction Identifier Namespace", format_type="count")
def test_reaction_id_namespace_consistency(model):
"""
Expect reaction identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for reactions consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main reaction identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a pandas.DataFrame with each column corresponding to one
database from the selection and each row to the reaction ID. A boolean
entry indicates whether the metabolite ID matches the regex pattern
of the corresponding database. Since the Biocyc pattern matches quite,
assume that any instance of an identifier matching to Biocyc
AND any other DB pattern is a false positive match for Biocyc and then set
the boolean to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_reaction_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(model, "reactions")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(
set(get_ids(model.reactions)).difference(
overview[overview[largest]].index.tolist()
)
)
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""{} reaction identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])
)
)
assert len(ann["data"]) == 0, ann["message"]
| apache-2.0 |
aisthesis/mfstockmkt | velocity/velo.py | 1 | 1938 | """
.. Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Get velocity data
=================
"""
import operator
import numpy as np
import pandas as pd
import pynance as pn
import constants
from mov_ext import MovingExtremumFinder
def rolling_vel(eqdata, window=100, selection='Adj Close'):
"""
Return a dataframe with prices and both upward and downward
velocity data.
Parameters
----------
eqdata : DataFrame
Source data
window : int, optional
The window of prior sessions over which
velocity is calculated. Defaults to 100.
selection : str, optional
Column of `eqdata` in which historical prices
are specified. Defaults to 'Adj Close'.
Returns
-------
mov_vel : DataFrame
"""
if window + 1 >= eqdata.index.shape[0]:
raise ValueError('insufficient data for given window')
outix = eqdata.index[window:]
upcol = constants.UPVEL_COL
downcol = constants.DOWNVEL_COL
mov_vel = pd.DataFrame(index=eqdata.index[window:],
columns=[selection, upcol, downcol], dtype='float64')
mov_vel.loc[:, selection] = eqdata.loc[:, selection].values[window:]
mov_vel.loc[:, upcol] = _velocity(eqdata, window, selection, operator.gt, upcol)
mov_vel.loc[:, downcol] = _velocity(eqdata, window, selection, operator.lt, downcol)
return mov_vel
def _velocity(eqdata, window, selection, compfn, outputcol):
inputdata = eqdata.loc[:, selection].values
vels = np.empty_like(eqdata.index[window:], dtype='float64')
ext_finder = MovingExtremumFinder(inputdata, window, compfn)
win_float = float(window)
# values up to `window` don't have adequate history
for i in range(window):
ext_finder.insert(i)
for i in range(window, inputdata.shape[0]):
vels[i - window] = float(window - ext_finder.insert(i)) / win_float
return vels
| mit |
saiwing-yeung/scikit-learn | sklearn/svm/tests/test_svm.py | 2 | 34260 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
| bsd-3-clause |
flennerhag/mlens | mlens/parallel/tests/test_b4_layer_push_pop.py | 1 | 4392 | """ML-Ensemble
:author: Sebastian Flennerhag
:copyright: 2017-2018
:license: MIT
Test layer push and pop ops.
"""
import os
import numpy as np
from mlens.index import INDEXERS
from mlens.testing.dummy import Data, ECM
from mlens.utils.dummy import Scale, LogisticRegression
from mlens.parallel import make_group, Layer, run
from mlens.externals.sklearn.base import clone
try:
from contextlib import redirect_stdout
except ImportError:
from mlens.externals.fixes import redirect as redirect_stdout
PREPROCESSING_1 = {'no1': [], 'sc1': [('scale', Scale())]}
ESTIMATORS_PROBA_1 = {'sc1': [('offs1', LogisticRegression(offset=2)),
('null1', LogisticRegression())],
'no1': [('offs1', LogisticRegression(offset=2)),
('null1', LogisticRegression())]}
PREPROCESSING_2 = {'no2': [], 'sc2': [('scale', Scale())]}
ESTIMATORS_PROBA_2 = {'sc2': [('offs2', LogisticRegression(offset=2)),
('null2', LogisticRegression())],
'no2': [('offs2', LogisticRegression(offset=2)),
('null2', LogisticRegression())]}
def scorer(p, y): return np.mean(p - y)
data = Data('stack', True, True)
X, y = data.get_data((25, 4), 3)
idx1 = INDEXERS['stack']()
g1 = make_group(
idx1, ESTIMATORS_PROBA_1, PREPROCESSING_1,
learner_kwargs={'proba': True, 'verbose': True},
transformer_kwargs={'verbose': True})
idx2 = INDEXERS['subsemble']()
g2 = make_group(
idx2, ESTIMATORS_PROBA_2, PREPROCESSING_2,
learner_kwargs={'proba': False, 'verbose': True},
transformer_kwargs={'verbose': True})
layer = Layer('layer')
def test_push_1():
"""[Parallel | Layer] Testing single push"""
assert not layer.__stack__
layer.push(g1)
assert layer.stack[0] is g1
assert layer.__stack__
with open(os.devnull, 'w') as f, redirect_stdout(f):
run(layer, 'fit', X, y)
run(layer, 'transform', X)
run(layer, 'predict', X)
assert layer.__fitted__
def test_push_2():
"""[Parallel | Layer] Test double push"""
layer.push(g2)
assert not layer.__fitted__
with open(os.devnull, 'w') as f, redirect_stdout(f):
a = run(layer, 'fit', X, y, refit=False, return_preds=True)
assert layer.__fitted__
with open(os.devnull, 'w') as f, redirect_stdout(f):
b = run(layer, 'fit', X, y, refit=False, return_preds=True)
with open(os.devnull, 'w') as f, redirect_stdout(f):
c = run(layer, 'transform', X, return_preds=True)
with open(os.devnull, 'w') as f, redirect_stdout(f):
d = run(layer, 'fit', X, y, refit=True, return_preds=True)
np.testing.assert_array_equal(a, b)
np.testing.assert_array_equal(a, c)
np.testing.assert_array_equal(a, d)
def test_clone():
"""[Parallel | Layer] Test cloning"""
lyr = clone(layer)
assert lyr.__stack__
assert not lyr.__fitted__
with open(os.devnull, 'w') as f, redirect_stdout(f):
F = run(layer, 'fit', X, y, refit=False, return_preds=True)
H = run(lyr, 'fit', X, y, return_preds=True)
np.testing.assert_array_equal(F, H)
with open(os.devnull, 'w') as f, redirect_stdout(f):
F = run(layer, 'transform', X)
H = run(lyr, 'transform', X)
np.testing.assert_array_equal(F, H)
with open(os.devnull, 'w') as f, redirect_stdout(f):
F = run(layer, 'predict', X)
H = run(lyr, 'predict', X)
np.testing.assert_array_equal(F, H)
def test_data():
"""[Parallel | Learner] Test data"""
idx = INDEXERS['subsemble']()
lyr = Layer('layer-scorer').push(make_group(idx, ECM, None))
for lr in lyr.learners:
lr.scorer = scorer
run(lyr, 'fit', X, y, return_preds=True)
repr = lyr.data.__repr__()
assert lyr.raw_data
assert isinstance(lyr.raw_data, list)
assert isinstance(lyr.data, dict)
assert repr
assert 'score' in repr
def test_pop():
"""[Parallel | Layer] Test pop"""
# Popping one group leaves the layer intact
g = layer.pop(0)
assert layer.__stack__
assert layer.__fitted__
assert g1 is g
# Popping both leaves if empty
g = layer.pop(0)
assert not layer.__stack__
assert not layer.__fitted__
assert g2 is g
# Pushing fitted groups makes the layer fitted
layer.push(g1, g2)
assert layer.__fitted__
| mit |
evgchz/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 23 | 8317 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
"""Incremental PCA on dense arrays."""
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
"""Test that the projection of data is correct."""
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
"""Test that the projection of data can be inverted."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
"""Test that n_components is >=1 and <= n_features."""
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
"""Test that changing n_components will raise an error."""
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
"""Test that components_ values are stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
"""Test that fit and partial_fit get equivalent results."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
"""Test that PCA and IncrementalPCA calculations match"""
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
"""Test that PCA and IncrementalPCA transforms match to sign flip."""
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| mit |
EachenKuang/PythonRepository | RubbishMessage-master/src/predictor.py | 1 | 9667 | # -*- coding: utf-8 -*-
# '''
# Author: Eachen Kuang
# Date: 2017.9.20
# Goal: 基于主题的情感分析比赛(使用jieba)
# Other:
# '''
# import gensim
import numpy as np
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.externals import joblib
from gensim import corpora, models, matutils
import random
import logging
from datetime import datetime
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def iter_readbatch(data_stream, minibatch_size=10000):
"""
迭代器
给定文件流(比如一个大文件),每次输出minibatch_size行,默认选择1k行
:param data_stream:
:param minibatch_size:
:return:
"""
cur_line_num = 0
with open(data_stream, 'r') as doc:
text = []
for line in doc:
text.append(line.strip().split())
cur_line_num += 1
if cur_line_num >= minibatch_size:
yield text
text = []
cur_line_num = 0
def save_dict():
"""
保存字典以及语料库
:return:
"""
dictionary = corpora.Dictionary()
for index, text in enumerate(iter_readbatch('../data/trainLeft.txt', minibatch_size=20000)):
print("{} time".format(index))
dictionary.add_documents(text)
print("success")
dictionary.save('../dictionary/dict{}.dict'.format(index))
# corpus = [dictionary.doc2bow(t) for t in text]
# corpora.BleiCorpus.serialize("../corpus/corpus_{}.blei".format(index), corpus, id2word=dictionary)
dictionary.save('../dictionary/dict.dict')
dictionary.save_as_text('../dictionary/dict4text', False)
def save_corpus():
"""
在Dictionary已经生产的情况下,标准化corpus并且存储在本地,20000doc一个corpus
:return:
"""
dictionary = corpora.Dictionary.load('../dictionary/new_dict_filter.dict')
for index, text in enumerate(iter_readbatch('../data/testLeft.txt', minibatch_size=20000)):
print ("{} time".format(index))
print ("success")
corpus = [dictionary.doc2bow(t) for t in text]
corpora.MmCorpus.serialize("../test_corpus/corpus_{}.mm".format(index), corpus, id2word=dictionary)
def filter_dictionary():
"""
过滤字典,将字典缩小
:return:
"""
"""
../dictionary/dict.dict 最原始的字典,保留最完全200w
../dictionary/dict_filter.dict 过滤后剩下30w字段的字典
../dictionary/new_dict_filter.dict 过滤掉2.4w 特殊字符的后剩下27.6w字段的字典
"""
dictionary = corpora.Dictionary.load('../dictionary/dict.dict')
# dictionary.filter_extremes(no_below=3, no_above=0.15, keep_n=None, keep_tokens=None)
dictionary.filter_extremes(5, 0.1, keep_n=300000) # 保证有300000的词汇
dictionary.save('../dictionary/dict_filter.dict')
dictionary.save_as_text('../dictionary/dict_filter4text')
# 28116
# dictionary = corpora.Dictionary.load('../dictionary/dict_filter.dict')
# bad_ids = []
# with open('../dictionary/dict_filter4text') as fr:
# for line in fr.readlines()[0:28116]:
# line = line.strip().split()[0]
# bad_ids.append(int(line))
# dictionary.filter_tokens(bad_ids=bad_ids)
# dictionary.save('../dictionary/new_dict_filter.dict')
# dictionary.save_as_text('../dictionary/new_dict_filter4text')
def read_label():
label_list = []
with open('../data/classLabel.txt') as fr:
for line in fr:
label_list.append(int(line.strip()))
return np.array(label_list)
def totalScore(pred, y_test):
A = 0
C = 0
B = 0
D = 0
for i in range(len(pred)):
if y_test[i] == 0:
if pred[i] == 0:
A += 1
elif pred[i] == 1:
B += 1
elif y_test[i] == 1:
if pred[i] == 0:
C += 1
elif pred[i] == 1:
D += 1
print (A, B, C, D, A + B + C + D)
rb_pr = 1.0 * D / (B + D)
rb_re = 1.0 * D / (C + D)
rt_pr = 1.0 * A / (A + C)
rt_re = 1.0 * A / (A + B)
# Frb = 0.65 * rb_pr + 0.35 * rb_re
# Frt = 0.65 * rt_pr + 0.35 * rt_re
# Ftotal = 0.7 * Frb + 0.3 * Frt
Ftotal = 2*rb_pr*rb_re/(rb_pr+rb_re)
print(Ftotal)
return Ftotal
def nbc():
choose_1 = random.randint(0, 25)
# choose_2 = random.randint(0, 25)
corpus1 = corpora.BleiCorpus('../corpus/corpus_{}.blei'.format(choose_1))
corpus1 = corpora.BleiCorpus('../corpus/corpus_0.blei')
# corpus_2 = corpora.BleiCorpus('../corpus/corpus_{}.blei'.format(choose_1))
test_X = matutils.corpus2csc(corpus1).transpose() # 测试集
# print test_X.get_shape()
label_list = read_label()
test_y = label_list[(choose_1*20000):(choose_1+1)*20000] # 测试集标签
test_y = label_list[(0 * 20000):(0 + 1) * 20000]
clf = MultinomialNB(alpha=0.01)
for index in range(0, 25):
corpus = corpora.BleiCorpus('../corpus/corpus_{}.blei'.format(index))
csi_matrix = matutils.corpus2csc(corpus).transpose()
if csi_matrix.get_shape() ==(20000, 271884):
print(csi_matrix.get_shape())
clf.partial_fit(csi_matrix,
label_list[(index*20000):(index+1)*20000],
classes=np.array([0, 1]))
print("第{}次".format(index))
pre = clf.predict(test_X)
totalScore(pre, test_y)
# sklearn.naive_bayes.MultinomialNB
# sklearn.naive_bayes.BernoulliNB
# sklearn.linear_model.Perceptron
# sklearn.linear_model.SGDClassifier
# sklearn.linear_model.PassiveAggressiveClassifier
def this_is_for_fun():
# clf = MultinomialNB(alpha=1.0)
# clf = SGDClassifier(alpha=0.0001)
# clf = PassiveAggressiveClassifier()
clf = BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
# clf = Perceptron(alpha=0.001)
print('BernoulliNB,a = 1')
label_list = read_label()
choose = random.randint(10, 24)
corpus = corpora.MmCorpus('../corpus_mm/corpus_{}.mm'.format(choose))
test_X = matutils.corpus2csc(corpus).transpose() # 测试集
test_y = label_list[(choose * 20000):(choose + 1) * 20000] # 测试集标签
for index in range(10, 25):
corpus = corpora.MmCorpus('../corpus_mm/corpus_{}.mm'.format(index))
csi_matrix = matutils.corpus2csc(corpus).transpose()
clf.partial_fit(csi_matrix,
label_list[(index * 20000):(index + 1) * 20000],
classes=np.array([0, 1]))
print("第{}次".format(index))
pre = clf.predict(test_X)
totalScore(pre, test_y)
joblib.dump(clf, "../model/BernoulliNB_model_{}.m".format(index))
# test_corpus = corpora.MmCorpus('../test_corpus/corpus_{}.mm'.format(index))
# csi_matrix = matutils.corpus2csc(test_corpus).transpose()
# clf.predict(csi_matrix)
def tfidf_train():
dictionary = corpora.Dictionary.load('../dictionary/new_dict_filter.dict')
for index in range(0, 1):
corpus = corpora.MmCorpus('../corpus_mm/corpus_{}.mm'.format(index))
tfidf_model = models.TfidfModel(corpus=corpus, dictionary=dictionary)
corpus_tfidf = np.array([tfidf_model[doc] for doc in corpus])
# lsi_model = models.LsiModel(corpus=corpus, id2word=dictionary, num_topics=50)
# corpus_lsi = [lsi_model[doc] for doc in corpus]
# lsi_model.add_documents(corpus)
# print corpus_tfidf
def analyse_lable():
label = read_label()
# a = label[0:20000]
print(label[label<0.5].__len__())
for index in range(0, 25):
temp = label[index*20000:(index+1)*20000]
zero = temp[temp<0.5].__len__()
one = 20000-zero
print(index, zero, one)
# filter_dictionary()
# read_label()
# corpus = corpora.BleiCorpus('../corpus/corpus_3.blei')
# corpus1 = corpora.BleiCorpus('../corpus/corpus_4.blei')
# print corpus.__len__(), corpus1.__len__()
# csi_matrix = matutils.corpus2csc(corpus)
# csi_matrix1 = matutils.corpus2csc(corpus1)
# print csi_matrix.get_shape, csi_matrix1.get_shape
# # clf = MultinomialNB(alpha=0.001)
# # clf.partial_fit(csi_matrix.transpose(), read_label()[0:20000], classes=np.array([0, 1])) # 需要对矩阵转置
# # clf.partial_fit(csi_matrix1.transpose(), read_label()[20000:40000])
# save_dict()
# save_corpus()
# nbc()
# this_is_for_fun()
# tfidf_train()
# analyse_lable()
def predict():
clf = joblib.load("../model/BernoulliNB_model_18.m")
with open('../data/label2.txt', 'w') as fw:
for index in range(20):
print(str(index)+'times')
test_corpus = corpora.MmCorpus('../test_corpus/corpus_{}.mm'.format(index))
csi_matrix = matutils.corpus2csc(test_corpus).transpose()
label = clf.predict(csi_matrix)
print(label.__len__())
for l in label:
fw.writelines(str(l)+'\n')
print(l)
def write_result():
now = str(datetime.now()).replace(' ','-').replace(':','-')
print(now)
with open('../data/label2.txt', 'r') as fr1,\
open('../data/testMsgNum.txt','r') as fr2,\
open('../out/out_{}.csv'.format(now),'w') as fw:
fr1 = fr1.readlines()
fr2 = fr2.readlines()
for index in range(400000):
line1 = str(fr1[index]).strip()
line2 = str(fr2[index]).strip()
if line1=='1':
line1 = 'POSITIVE'
else:
line1 = 'NEGATIVE'
fw.write("{},{}\n".format(line2,line1))
# this_is_for_fun()
# predict()
write_result()
| apache-2.0 |
lazywei/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
jmizell/Filtering-Proxy-Example | data/train.py | 1 | 2507 | import re
import glob
import time
from sklearn import svm
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
data_set_labels = []
data_set_values = []
labels = ['negative', 'positive']
classifier_file = 'svm_linear_classifier.pkl'
vectorizer_file = 'vectorizer.pkl'
# read the training data in
for label in labels:
for dataset_file in glob.glob("%s/*.txt" % label):
with open(dataset_file, 'r') as f:
data_text = f.read().splitlines()
# remove any line that begins with a hash symbol
data_text = filter(None, map(lambda x: None if re.search(r"^#", x) else x, data_text))
data_set_values += data_text
data_set_labels += [label] * len(data_text)
for label in labels:
print("Loaded %i %s examples" % (data_set_labels.count(label), label))
# split a third in to test, and train sets
label_train, label_test, values_train, values_test = train_test_split(
data_set_labels,
data_set_values,
test_size=0.33,
random_state=0)
print("Data split, training: %s, test: %s" % (len(values_train), len(values_test)))
# Create feature vectors
vectorizer = TfidfVectorizer(sublinear_tf=True,
lowercase=True,
use_idf=True)
train_vectors = vectorizer.fit_transform(values_train)
test_vectors = vectorizer.transform(values_test)
# Train the model
t0 = time.time()
classifier_liblinear = svm.LinearSVC(C=1, verbose=True).fit(train_vectors, label_train)
train_time = time.time() - t0
accuracy = classifier_liblinear.score(test_vectors, label_test)
print("Training time: %fs" % train_time)
print("Test Accuracy %s" % accuracy)
if accuracy > 0.95:
print("Warning, this model may be over fit")
# Test a few examples
test_examples = [
'select value1, value2, num_value3 from database',
'hello, world!',
'test message',
'GRANT ALL ON TABLE metadatacollectionschema.altidcollection TO omerouser',
'select * from users where username=%s']
sample_vector = vectorizer.transform(test_examples)
for i, prediction in enumerate(classifier_liblinear.predict(sample_vector)):
print(prediction, test_examples[i])
# write the model, and vectorizor to disk
joblib.dump(classifier_liblinear, classifier_file, compress=True)
print("Wrote classifier to %s" % classifier_file)
joblib.dump(vectorizer, vectorizer_file, compress=True)
print("Wrote vectorizer to %s" % vectorizer_file)
| apache-2.0 |
jameskeaveney/ElecSus | elecsus/libs/MLFittingRoutine.py | 1 | 5556 | # Copyright 2014 M. A. Zentile, J. Keaveney, L. Weller, D. Whiting,
# C. S. Adams and I. G. Hughes.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Marquardt-Levenberg fit module
Overhauled October 2016 to use the lmfit module
(https://lmfit.github.io/lmfit-py/index.html; pip install lmfit; also in the enthought package manager index)
instead of curve_fit, which makes it much more elegant and easy
to fit only the selected parameters, and include bounds on parameters
Author: JK
differential evolution needs lmfit version >= 0.9.3
Last updated 2018-07-04 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import matplotlib.pyplot as plt
import numpy as np
import lmfit as lm
from spectra import get_spectra
import time
def fit_function(x,E_x,E_y,E_phase,T,lcell,Bfield,Btheta,Bphi,GammaBuf,shift,
DoppTemp=20,rb85frac=72.17,K40frac=0.01,K41frac=6.73,
Elem='Rb',Dline='D2',Constrain=True,output='S0', verbose=False):
"""
Fit function that is used by lmfit. Essentially just a wrapper for the get_spectra method,
but the arguments are specified explicitly rather than being inside a parameter dictionary.
Also allows for the polarisation components to be fitted
For explanation of parameters, see the documentation for get_spectra (in spectra.py)
"""
if verbose:
print(('Parameters: ', Bfield, T, lcell, E_x, E_y, E_phase, Btheta, Bphi, GammaBuf, shift, DoppTemp, rb85frac))
# Ex / Ey separated to allow for fitting polarisation
E_in = np.array([E_x,E_y*np.exp(1.j*E_phase),0.])
#print E_in
#reconstruct parameter dictionary from arguments
p_dict = {'Elem':Elem,'Dline':Dline,'T':T,'lcell':lcell,
'Bfield':Bfield,'Btheta':Btheta,'Bphi':Bphi,'GammaBuf':GammaBuf,
'shift':shift,'DoppTemp':DoppTemp,'Constrain':Constrain,
'rb85frac':rb85frac,'K40frac':K40frac,'K41frac':K41frac}
#for key in p_dict.keys():
# print key, p_dict[key]
outputs = [output]
y_out = get_spectra(x,E_in,p_dict,outputs)[0].real
return y_out
def ML_fit(data,E_in,p_dict,p_dict_bools,data_type='S0',p_dict_bounds=None,method='leastsq',verbose=False):
"""
Main fitting method.
*** Example use cases can be found in the /tests/fitting_tests.py file ***
data: an Nx2 iterable for the x and y data to be fitted
E_in: the initial electric field input. See docstring for the spectra.py module for details.
p_dict: dictionary containing all the calculation (initial) parameters
p_dict_bools: dictionary with the same keys as p_dict, with Boolean values representing each parameter that is to be varied in the fitting
p_dict_bounds: dictionary with the same keys as p_dict, with values that are pairs of min/max values that each parameter can take.
Optional, except for when using 'differential_evolution' fitting method, when bounds must be provided on fit parameters
data_type: Data type to fit experimental data to. Can be one of:
'S0', 'S1', 'S2', 'S3', 'Ix', 'Iy', ...
verbose: Boolean - more print statements provided as the program progresses
method: passed to lmfit, the fitting algorithm that is used. Can be anything that is supported by lmfit, which is currently (as of 2017-June-01):
'leastsq': Levenberg-Marquardt (default)
'least_squares': Least-Squares minimization, using Trust Region Reflective method by default
'differential_evolution': differential evolution
'brute': brute force method
'nelder': Nelder-Mead
'lbfgsb': L-BFGS-B
'powell': Powell
'cg': Conjugate-Gradient
'newton': Newton-Congugate-Gradient
'cobyla': Cobyla
'tnc': Truncate Newton
'trust-ncg': Trust Newton-Congugate-Gradient
'dogleg': Dogleg
'slsqp': Sequential Linear Squares Programming
see https://lmfit.github.io/lmfit-py/fitting.html for more information
"""
x = np.array(data[0])
y = np.array(data[1])
# Non-numeric arguments to pass to fitting function
kwargz = {'Elem':p_dict['Elem'],'Dline':p_dict['Dline']}
if 'Constrain' in list(p_dict.keys()):
kwargz['Constrain'] = p_dict['Constrain']
else:
kwargz['Constrain'] = True
kwargz['output'] = data_type
model = lm.Model(fit_function)
params = model.make_params(**p_dict)
params['E_x'].value = E_in[0]
params['E_y'].value = E_in[1][0]
params['E_phase'].value = E_in[1][1]
params['E_phase'].min = 0
params['E_phase'].max = np.pi-1e-4
# Turn off all parameters varying by default, unless specified in p_dict_bools
allkeys = params.valuesdict()
for key in allkeys:
params[key].vary = False
# Turn on fitting parameters as specified in p_dict_bools
for key in p_dict_bools:
params[key].vary = p_dict_bools[key]
if p_dict_bounds is not None:
if key in p_dict_bounds:
params[key].min = p_dict_bounds[key][0]
params[key].max = p_dict_bounds[key][1]
if verbose: print(params)
result = model.fit(y, x=x, params=params, method=method, **kwargz)
return result.best_values, result | apache-2.0 |
ArtRand/signalAlign | scripts/experiments/vis_kmer_distributions.py | 2 | 4198 | #!/usr/bin/env python
"""A collection of functions for visualizing kmer distributions
"""
import os
import sys
sys.path.append("../")
from signalAlignLib import TemplateModel, ComplementModel
import string
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
from scipy.stats import norm
def nucleotideToIndex(base):
if base == 'A':
return 0
if base == 'C':
return 1
if base == 'E':
return 2
if base == 'G':
return 3
if base == 'O':
return 4
if base == 'T':
return 5
if base == 'N':
return 6
def getKmerIndex(kmer):
"""This is the algorithm for finding the rank (index) of a kmer)
"""
alphabet = "ACEGOT"
axisLength = len(alphabet)**len(kmer)
l = axisLength/len(alphabet)
i = 0
index = 0
while l > 1:
index += l*nucleotideToIndex(kmer[i])
i += 1
l = l/len(alphabet)
index += nucleotideToIndex(kmer[-1])
return int(index)
class KmerDistribution(object):
def __init__(self, data_directory):
self.data_directory = data_directory
class KmerAlignmentHistogram(KmerDistribution):
def __init__(self, data_directory, kmer):
super(KmerAlignmentHistogram, self).__init__(data_directory=data_directory)
self.kmer = kmer
self.data_file = data_directory + "{}_hist.txt".format(self.kmer)
assert (os.path.isfile(self.data_file)), "No data for kmer {kmer}".format(kmer=self.kmer)
self.histogram = None
self.x_vals = None
self.kde_pdf = None
self.parse_histogram()
def parse_histogram(self):
self.histogram = np.loadtxt(self.data_file, dtype=np.float64)
self.histogram = [x for x in self.histogram if 0 < x < 100]
def parse_xvals(self, x_vals_file):
self.x_vals = np.loadtxt(x_vals_file, dtype=np.float64)
def make_kde(self, x_vals):
kernel = gaussian_kde(self.histogram)
KDE_PDF = kernel.evaluate(x_vals)
return KDE_PDF
class KmerHdpDistribution(KmerDistribution):
def __init__(self, data_directory, kmer):
super(KmerHdpDistribution, self).__init__(data_directory=data_directory)
self.kmer = kmer
self.data_file = data_directory + "{}_distr.txt".format(self.kmer)
assert (os.path.isfile(self.data_file)), "Didn't find distribution file at {}".format(self.data_file)
self.density = None
self.parse_density_file()
def parse_density_file(self):
self.density = np.loadtxt(self.data_file, dtype=np.float64)
def get_kmer_densities(path, kmer):
mC_trans = string.maketrans("C", "E")
hmC_trans = string.maketrans("C", "O")
c_density = KmerHdpDistribution(path, kmer)
mc_density = KmerHdpDistribution(path, kmer.translate(mC_trans))
hmc_density = KmerHdpDistribution(path, kmer.translate(hmC_trans))
return c_density, mc_density, hmc_density
def plot_ont_distribution(kmer, fast5, x_vals):
def get_model_table(model):
return model.get_model_dict()
template_model = get_model_table(TemplateModel(fast5File=fast5))
complement_model = get_model_table(ComplementModel(fast5File=fast5))
return norm.pdf(x_vals, template_model[kmer][0], template_model[kmer][1]), \
norm.pdf(x_vals, complement_model[kmer][0], complement_model[kmer][1])
def plot_hmm_distribution(kmer, hmm, x_vals):
def get_model_from_hmm(model_file):
fH = open(hmm, 'r')
# line 0, type, stateNumber, etc
line = map(float, fH.readline().split())
assert len(line) == 4, "Bad header line"
# line 1, transitions
line = map(float, fH.readline().split())
assert len(line) == 10, "Bad transitions line"
# line 2, model
line = map(float, fH.readline().split())
assert len(line) == 6**6 * 2 # number of kmers * normal distribution parameters
return line
model = get_model_from_hmm(hmm)
kmer_index = getKmerIndex(kmer)
table_index = kmer_index * 2
print model[table_index], model[table_index + 1]
return norm.pdf(x_vals, model[table_index], model[table_index + 1])
| mit |
HyukjinKwon/spark | python/pyspark/pandas/tests/test_ops_on_diff_frames_groupby_rolling.py | 15 | 3500 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class OpsOnDiffFramesGroupByRollingTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
def _test_groupby_rolling_func(self, f):
pser = pd.Series([1, 2, 3], name="a")
pkey = pd.Series([1, 2, 3], name="a")
psser = ps.from_pandas(pser)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psser.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pser.groupby(pkey).rolling(2), f)().sort_index(),
)
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pkey = pd.Series([1, 2, 3, 2], name="a")
psdf = ps.from_pandas(pdf)
kkey = ps.from_pandas(pkey)
self.assert_eq(
getattr(psdf.groupby(kkey).rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey).rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)["b"].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)["b"].rolling(2), f)().sort_index(),
)
self.assert_eq(
getattr(psdf.groupby(kkey)[["b"]].rolling(2), f)().sort_index(),
getattr(pdf.groupby(pkey)[["b"]].rolling(2), f)().sort_index(),
)
def test_groupby_rolling_count(self):
self._test_groupby_rolling_func("count")
def test_groupby_rolling_min(self):
self._test_groupby_rolling_func("min")
def test_groupby_rolling_max(self):
self._test_groupby_rolling_func("max")
def test_groupby_rolling_mean(self):
self._test_groupby_rolling_func("mean")
def test_groupby_rolling_sum(self):
self._test_groupby_rolling_func("sum")
def test_groupby_rolling_std(self):
# TODO: `std` now raise error in pandas 1.0.0
self._test_groupby_rolling_func("std")
def test_groupby_rolling_var(self):
self._test_groupby_rolling_func("var")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_ops_on_diff_frames_groupby_rolling import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
CooperLuan/airflow | airflow/hooks/presto_hook.py | 37 | 2626 | from builtins import str
from pyhive import presto
from pyhive.exc import DatabaseError
from airflow.hooks.dbapi_hook import DbApiHook
import logging
logging.getLogger("pyhive").setLevel(logging.INFO)
class PrestoException(Exception):
pass
class PrestoHook(DbApiHook):
"""
Interact with Presto through PyHive!
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id)
return presto.connect(
host=db.host,
port=db.port,
username=db.login,
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema)
@staticmethod
def _strip_sql(sql):
return sql.strip().rstrip(';')
def get_records(self, hql, parameters=None):
"""
Get a set of records from Presto
"""
try:
return super(PrestoHook, self).get_records(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_first(self, hql, parameters=None):
"""
Returns only the first row, regardless of how many rows the query
returns.
"""
try:
return super(PrestoHook, self).get_first(
self._strip_sql(hql), parameters)
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
def get_pandas_df(self, hql, parameters=None):
"""
Get a pandas dataframe from a sql query.
"""
import pandas
cursor = self.get_cursor()
cursor.execute(self._strip_sql(hql), parameters)
try:
data = cursor.fetchall()
except DatabaseError as e:
obj = eval(str(e))
raise PrestoException(obj['message'])
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame()
return df
def run(self, hql, parameters=None):
"""
Execute the statement against Presto. Can be used to create views.
"""
return super(PrestoHook, self).run(self._strip_sql(hql), parameters)
def insert_rows(self):
raise NotImplemented()
| apache-2.0 |
krez13/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
jorik041/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
durcan/phonon_nrecoil | model.py | 1 | 3004 | import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
import triangle
class pmodel(object):
def mu_model(self, *args):
# linear model for mean
m, b, x = args
return m * x + b
def sig_model(self, *args):
c, d, e, x = args
return np.true_divide(c, x) + d*x + e
def ln_sig_model(self, *args):
# sigma has a c/x + d model
lnc, d, lne, x = args
return np.true_divide(np.exp(lnc), x) + d*x + np.exp(lne)
def lnlike(self, theta, x, y):
m, b, lnc, lnd = theta
mu = self.mu_model(m, b, x)
inv_sig_sq = 1.0/self.ln_sig_model(lnc, lnd, x)**2
return -0.5*(np.sum(inv_sig_sq*(y-mu)**2 - np.log(2*np.pi*inv_sig_sq)))
def fit_max_like(self, m_guess, b_guess, c_guess, d_guess, e_guess, x, y):
nll = lambda *args: -self.lnlike(*args)
result = op.minimize(
nll,
[m_guess, b_guess, np.log(c_guess), d_guess],
args=(x, y))
return result
def lnprior(self, theta):
m, b, lnc, lnd = theta
if (
-1.0 < m < 1.0 and
-1.0 < b < 1.0 and
-10.0 < lnc < 10.0 and
-10.0 < lnd < 10.0
):
return 0.0
else:
return -np.inf
def lnprob(self, theta, x, y):
lp = self.lnprior(theta)
if not np.isfinite(lp):
return -np.inf
else:
return lp + self.lnlike(theta, x, y)
def runner_graph(self, sampler):
pl.clf()
fig, axes = pl.subplots(4, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$m$")
axes[1].plot(sampler.chain[:, :, 1].T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$b$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$lnc$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$lnd$")
axes[3].set_xlabel("step number")
pl.show()
def corner_graph(self, sampler, burnin):
samples = sampler.chain[:, burnin:, :].reshape((-1, 4))
fig = triangle.corner(
samples,
labels=["$m$", "$b$", "$\ln\,c$", "$\ln\,d$"])
pl.show()
class pmodel2(pmodel):
def sig_model(self, *args):
# model is e*exp(-x/c) + d
c, d, e, x = args
return e*np.exp(-np.true_divide(x, c)) + d
def ln_sig_model(self, *args):
# sigma has e*exp(-x/c) + d model
lnc, lnd, lne, x = args
return np.exp(lne)*np.exp(-np.true_divide(x, np.exp(lnc))) + np.exp(lnd) | mit |
fastats/fastats | tests/maths/test_ewma.py | 2 | 1379 |
import numpy as np
import pandas as pd
import pytest
from fastats.maths.ewma import ewma, ewma_2d
def _validate_results(random_data, fn=ewma):
df = pd.DataFrame(random_data)
pandas_result = df.ewm(halflife=10).mean()
ewma_result = fn(random_data, halflife=10)
fast_result = pd.DataFrame(ewma_result)
pd.testing.assert_frame_equal(pandas_result, fast_result)
def test_ewma_1d_array():
rng = np.random.RandomState(0)
random_data = rng.randn(100)
_validate_results(random_data)
@pytest.mark.parametrize('fn', (ewma, ewma_2d))
def test_ewma_basic_sanity(fn):
rng = np.random.RandomState(0)
random_data = rng.randn(10000).reshape(1000, 10)
_validate_results(random_data, fn)
@pytest.mark.parametrize('fn', (ewma, ewma_2d))
def test_bad_halflifes(fn):
random_data = np.random.random(100)
bad_halflifes = [
np.NaN,
np.inf,
-np.inf,
-100,
0,
]
for halflife in bad_halflifes:
with pytest.raises(AssertionError):
fn(random_data, halflife)
@pytest.mark.xfail(reason='NaN support to be implemented')
def test_nan_compat():
random_data = np.random.random((100, 100))
random_data[0, :] = np.nan
random_data[10, :] = np.nan
random_data[70, :50] = np.nan
_validate_results(random_data)
if __name__ == '__main__':
pytest.main([__file__])
| mit |
hanteng/pyCHNadm1 | pyCHNadm1/_examples/seaborn_pair_grid.py | 1 | 2682 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
## Loading datasets
import pyCHNadm1 as CHN
## Loading seaborn and other scientific analysis and visualization modules
## More: http://stanford.edu/~mwaskom/software/seaborn/tutorial/axis_grids.html
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
## Using seaborn style
sns.set_style("whitegrid")
sns.set_context("paper", font_scale=1.6, )
## Checking the existing indicators available
indicators_all=CHN.CHNp.items
print indicators_all
## Slicing the data on the year of 2013 on selected indicators
df_2013=CHN.CHNp[['GDP','IPop','websites'],:,2013]
## Using 3ER to get the categorization of CHN regions
df_2013['3ER'] = [CHN.CHNmapping['3ER'][x] for x in df_2013.index]
g = sns.PairGrid(df_2013.reset_index(), hue="3ER")
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter) # sns.regplot (linear regression)
g.add_legend() # Add legend
plt.show()
################################################
## Focusing on the relationship between the number of websites (y-axis) to GDP (x-axis)
df=df_2013[['GDP','websites']]
df=df.sort([u'websites'], ascending=False)
df.columns=['x:GDP','y: websites']
def labelix(i,x):
if i<8:
return CHN.CHNmapping['name_used'][x]+'\n'+CHN.CHNmapping['name_zhs_one'][x]
else:
return '\n'+CHN.CHNmapping['name_zhs_one'][x]
# Changeing indexes for labelling
#df.index=[CHN.CHNmapping['name_zhs_short'][x] for x in df.index] #short Chinese names
#df.index=[CHN.CHNmapping['name_zhs_one'][x]+'\n'+CHN.CHNmapping['name_used'][x] for x in df.index] #English names + name_zhs_one
df.index=[labelix(i,x) for i,x in enumerate(df.index)] #English names + name_zhs_one
from matplotlib.font_manager import FontProperties
ChineseFont = FontProperties('SimHei')
from matplotlib import cm
cmap = cm.get_cmap('Spectral')
fig, ax = plt.subplots()
df.plot('x:GDP', 'y: websites', kind='scatter', ax=ax, s=560, linewidth=0,
c=range(len(df)), colormap=cmap, alpha=0.25)
ax.set_xlabel("GDP (100 Million RMB) "+CHN.CHNmeta['indicator']['GDP'],fontproperties = ChineseFont,fontsize=18,)
ax.set_ylabel("Number of Websites (10 Thousand)"+CHN.CHNmeta['indicator']['websites'],fontproperties = ChineseFont,fontsize=18,)
for k, v in df.iterrows():
ax.annotate(k, v,
xytext=(-3,10), textcoords='offset points',
horizontalalignment='center', verticalalignment='center',
fontproperties = ChineseFont, fontsize=16, color='darkslategrey')
fig.delaxes(fig.axes[1]) #remove color bar
plt.show()
| gpl-3.0 |
MichaelXin/fast-rcnn | tools/train_svms.py | 42 | 13247 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
return ((w * self.feature_scale, b * self.feature_scale),
pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_roidb()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
spallavolu/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
rueckstiess/dopamine | scripts/test_fqi.py | 1 | 1755 | from dopamine.environments import TestEnvironment
from dopamine.agents import FQIAgent
from dopamine.experiments import Experiment
from dopamine.adapters import EpsilonGreedyExplorer, BoltzmannExplorer
from matplotlib import pyplot as plt
from numpy import *
def plotPolicy(agent):
plt.clf()
for s in arange(-1, 1, 0.01):
s = array([s])
q0 = agent.estimator.getValue(s, array([0]))
q1 = agent.estimator.getValue(s, array([1]))
plt.plot(s, q0, '.r')
plt.plot(s, q1, '.b')
# inps = agent.estimator.dataset['input']
# tgts = agent.estimator.dataset['target'].flatten()
#
# red = where(inps[:,1])[0]
# blue = where(inps[:,2])[0]
#
# plt.plot(inps[red,0].flatten(), tgts[red], 'sr', alpha=0.5)
# plt.plot(inps[blue,0].flatten(), tgts[blue], 'sb', alpha=0.5)
plt.gcf().canvas.draw()
# create agent, environment, renderer, experiment
agent = FQIAgent()
environment = TestEnvironment()
experiment = Experiment(environment, agent)
# add normalization adapter
# normalizer = NormalizingAdapter()
# experiment.addAdapter(normalizer)
# add e-greedy exploration
# explorer = BoltzmannExplorer(2.0, episodeCount=1000)
explorer = EpsilonGreedyExplorer(0.5, episodeCount=1000)
experiment.addAdapter(explorer)
# run 10 episodes to initialize the normalizing adapter
for i in range(10):
experiment.runEpisode(reset=True)
# print "normalizing:", normalizer.minStates, normalizer.maxStates
agent.forget()
plt.ion()
# run experiment
for i in range(1000):
for i in range(1):
experiment.runEpisode(reset=True)
agent.learn()
print "mean rewards:", mean(agent.episode.rewards)
print "exploration:", explorer.epsilon
plotPolicy(agent)
| gpl-3.0 |
ptitjano/bokeh | bokeh/charts/attributes.py | 6 | 14622 | from __future__ import absolute_import
from copy import copy
from itertools import cycle
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.charts.properties import ColumnLabel
from bokeh.charts.utils import marker_types
from bokeh.charts.data_source import ChartDataSource
from bokeh.charts.stats import Bins
from bokeh.core.enums import DashPattern
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, String, List, Instance, Either, Any, Dict,
Bool, Override)
class AttrSpec(HasProps):
"""A container for assigning attributes to values and retrieving them as needed.
A special function this provides is automatically handling cases where the provided
iterator is too short compared to the distinct values provided.
Once created as attr_spec, you can do attr_spec[data_label], where data_label must
be a one dimensional tuple of values, representing the unique group in the data.
See the :meth:`AttrSpec.setup` method for the primary way to provide an existing
AttrSpec with data and column values and update all derived property values.
"""
data = Instance(ColumnDataSource)
iterable = List(Any, default=None)
attrname = String(help='Name of the attribute the spec provides.')
columns = Either(ColumnLabel, List(ColumnLabel), help="""
The label or list of column labels that correspond to the columns that will be
used to find all distinct values (single column) or combination of values (
multiple columns) to then assign a unique attribute to. If not enough unique
attribute values are found, then the attribute values will be cycled.
""")
default = Any(default=None, help="""
The default value for the attribute, which is used if no column is assigned to
the attribute for plotting. If the default value is not provided, the first
value in the `iterable` property is used.
""")
attr_map = Dict(Any, Any, help="""
Created by the attribute specification when `iterable` and `data` are
available. The `attr_map` will include a mapping between the distinct value(s)
found in `columns` and the attribute value that has been assigned.
""")
items = Any(default=None, help="""
The attribute specification calculates this list of distinct values that are
found in `columns` of `data`.
""")
sort = Bool(default=True, help="""
A boolean flag to tell the attribute specification to sort `items`, when it is
calculated. This affects which value of `iterable` is assigned to each distinct
value in `items`.
""")
ascending = Bool(default=True, help="""
A boolean flag to tell the attribute specification how to sort `items` if the
`sort` property is set to `True`. The default setting for `ascending` is `True`.
""")
bins = Instance(Bins, help="""
If an attribute spec is binning data, so that we can map one value in the
`iterable` to one value in `items`, then this attribute will contain an instance
of the Bins stat. This is used to create unique labels for each bin, which is
then used for `items` instead of the actual unique values in `columns`.
""")
def __init__(self, columns=None, df=None, iterable=None, default=None,
items=None, **properties):
"""Create a lazy evaluated attribute specification.
Args:
columns: a list of column labels
df(:class:`~pandas.DataFrame`): the data source for the attribute spec.
iterable: an iterable of distinct attribute values
default: a value to use as the default attribute when no columns are passed
items: the distinct values in columns. If items is provided as input,
then the values provided are used instead of being calculated. This can
be used to force a specific order for assignment.
**properties: other properties to pass to parent :class:`HasProps`
"""
properties['columns'] = self._ensure_list(columns)
if df is not None:
properties['data'] = ColumnDataSource(df)
if default is None and iterable is not None:
default_iter = copy(iterable)
properties['default'] = next(iter(default_iter))
elif default is not None:
properties['default'] = default
if iterable is not None:
properties['iterable'] = iterable
if items is not None:
properties['items'] = items
super(AttrSpec, self).__init__(**properties)
if self.default is None and self.iterable is not None:
self.default = next(iter(copy(self.iterable)))
if self.data is not None and self.columns is not None:
if df is None:
df = self.data.to_df()
self._generate_items(df, columns=self.columns)
if self.items is not None and self.iterable is not None:
self.attr_map = self._create_attr_map()
@staticmethod
def _ensure_list(attr):
"""Always returns a list with the provided value. Returns the value if a list."""
if isinstance(attr, str):
return [attr]
elif isinstance(attr, tuple):
return list(attr)
else:
return attr
@staticmethod
def _ensure_tuple(attr):
"""Return tuple with the provided value. Returns the value if a tuple."""
if not isinstance(attr, tuple):
return (attr,)
else:
return attr
def _setup_default(self):
"""Stores the first value of iterable into `default` property."""
self.default = next(self._setup_iterable())
def _setup_iterable(self):
"""Default behavior is to copy and cycle the provided iterable."""
return cycle(copy(self.iterable))
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if self.sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
df = df.sort_values(by=columns, ascending=self.ascending)
except AttributeError:
df = df.sort(columns=columns, ascending=self.ascending)
items = df[columns].drop_duplicates()
self.items = [tuple(x) for x in items.to_records(index=False)]
def _create_attr_map(self, df=None, columns=None):
"""Creates map between unique values and available attributes."""
if df is not None and columns is not None:
self._generate_items(df, columns)
iterable = self._setup_iterable()
return {item: next(iterable) for item in self._item_tuples()}
def _item_tuples(self):
return [self._ensure_tuple(item) for item in self.items]
def set_columns(self, columns):
"""Set columns property and update derived properties as needed."""
columns = self._ensure_list(columns)
if all([col in self.data.column_names for col in columns]):
self.columns = columns
else:
# we have input values other than columns
# assume this is now the iterable at this point
self.iterable = columns
self._setup_default()
def setup(self, data=None, columns=None):
"""Set the data and update derived properties as needed."""
if data is not None:
self.data = data
if columns is not None and self.data is not None:
self.set_columns(columns)
if self.columns is not None and self.data is not None:
self.attr_map = self._create_attr_map(self.data.to_df(), self.columns)
def update_data(self, data):
self.setup(data=data, columns=self.columns)
def __getitem__(self, item):
"""Lookup the attribute to use for the given unique group label."""
if not self.attr_map:
return self.default
elif self._ensure_tuple(item) not in self.attr_map.keys():
# make sure we have attr map
self.setup()
return self.attr_map[self._ensure_tuple(item)]
@property
def series(self):
if not self.attr_map:
return pd.Series()
else:
index = pd.MultiIndex.from_tuples(self._item_tuples(), names=self.columns)
return pd.Series(list(self.attr_map.values()), index=index)
class ColorAttr(AttrSpec):
"""An attribute specification for mapping unique data values to colors.
.. note::
Should be expanded to support more complex coloring options.
"""
attrname = Override(default='color')
iterable = Override(default=DEFAULT_PALETTE)
bin = Bool(default=False)
def __init__(self, **kwargs):
iterable = kwargs.pop('palette', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(ColorAttr, self).__init__(**kwargs)
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(source=ColumnDataSource(df), column=columns[0],
bins=len(self.iterable), aggregate=False)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError('Binned colors can only be created for one column of \
numerical data.')
def add_bin_labels(self, data):
col = self.columns[0]
# save original values into new column
data._data[col + '_values'] = data._data[col]
for bin in self.bins:
# set all rows associated to each bin to the bin label being mapped to colors
data._data.ix[data._data[col + '_values'].isin(bin.values),
col] = bin.label[0]
data._data[col] = pd.Categorical(data._data[col], categories=list(self.items),
ordered=self.sort)
class MarkerAttr(AttrSpec):
"""An attribute specification for mapping unique data values to markers."""
attrname = Override(default='marker')
iterable = Override(default=list(marker_types.keys()))
def __init__(self, **kwargs):
iterable = kwargs.pop('markers', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(MarkerAttr, self).__init__(**kwargs)
dashes = DashPattern._values
class DashAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='dash')
iterable = Override(default=dashes)
def __init__(self, **kwargs):
iterable = kwargs.pop('dash', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(DashAttr, self).__init__(**kwargs)
class IdAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='id')
def _setup_iterable(self):
return iter(range(0, len(self.items)))
class CatAttr(AttrSpec):
"""An attribute specification for mapping unique data values to labels.
.. note::
this is a special attribute specification, which is used for defining which
labels are used for one aspect of a chart (grouping) vs another (stacking or
legend)
"""
attrname = Override(default='nest')
def __init__(self, **kwargs):
super(CatAttr, self).__init__(**kwargs)
def _setup_iterable(self):
return iter(self.items)
def get_levels(self, columns):
"""Provides a list of levels the attribute represents."""
if self.columns is not None:
levels = [columns.index(col) for col in self.columns]
return levels
else:
return []
""" Attribute Spec Functions
Convenient functions for producing attribute specifications. These would be
the interface used by end users when providing attribute specs as inputs
to the Chart.
"""
def color(columns=None, palette=None, bin=False, **kwargs):
"""Produces a ColorAttr specification for coloring groups of data based on columns.
Args:
columns (str or list(str), optional): a column or list of columns for coloring
palette (list(str), optional): a list of colors to use for assigning to unique
values in `columns`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `ColorAttr` object
"""
if palette is not None:
kwargs['palette'] = palette
kwargs['columns'] = columns
kwargs['bin'] = bin
return ColorAttr(**kwargs)
def marker(columns=None, markers=None, **kwargs):
""" Specifies detailed configuration for a marker attribute.
Args:
columns (list or str):
markers (list(str) or str): a custom list of markers. Must exist within
:data:`marker_types`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `MarkerAttr` object
"""
if markers is not None:
kwargs['markers'] = markers
kwargs['columns'] = columns
return MarkerAttr(**kwargs)
def cat(columns=None, cats=None, sort=True, ascending=True, **kwargs):
""" Specifies detailed configuration for a chart attribute that uses categoricals.
Args:
columns (list or str): the columns used to generate the categorical variable
cats (list, optional): overrides the values derived from columns
sort (bool, optional): whether to sort the categorical values (default=True)
ascending (bool, optional): whether to sort the categorical values (default=True)
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `CatAttr` object
"""
if cats is not None:
kwargs['cats'] = cats
kwargs['columns'] = columns
kwargs['sort'] = sort
kwargs['ascending'] = ascending
return CatAttr(**kwargs)
| bsd-3-clause |
kdebrab/pandas | pandas/tests/indexes/period/test_period_range.py | 11 | 3646 | import pytest
import pandas.util.testing as tm
from pandas import date_range, NaT, period_range, Period, PeriodIndex
class TestPeriodRange(object):
@pytest.mark.parametrize('freq', ['D', 'W', 'M', 'Q', 'A'])
def test_construction_from_string(self, freq):
# non-empty
expected = date_range(start='2017-01-01', periods=5,
freq=freq, name='foo').to_period()
start, end = str(expected[0]), str(expected[-1])
result = period_range(start=start, end=end, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=start, periods=5, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=5, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq=freq, name='foo')
result = period_range(start=start, periods=0, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
def test_construction_from_period(self):
# upsampling
start, end = Period('2017Q1', freq='Q'), Period('2018Q1', freq='Q')
expected = date_range(start='2017-03-31', end='2018-03-31', freq='M',
name='foo').to_period()
result = period_range(start=start, end=end, freq='M', name='foo')
tm.assert_index_equal(result, expected)
# downsampling
start, end = Period('2017-1', freq='M'), Period('2019-12', freq='M')
expected = date_range(start='2017-01-31', end='2019-12-31', freq='Q',
name='foo').to_period()
result = period_range(start=start, end=end, freq='Q', name='foo')
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq='W', name='foo')
result = period_range(start=start, periods=0, freq='W', name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq='W', name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq='W', name='foo')
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(end='2017Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(periods=5)
with tm.assert_raises_regex(ValueError, msg):
period_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1', end='2018Q1', periods=8, freq='Q')
# start/end NaT
msg = 'start and end must not be NaT'
with tm.assert_raises_regex(ValueError, msg):
period_range(start=NaT, end='2018Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1', end=NaT)
# invalid periods param
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
period_range(start='2017Q1', periods='foo')
| bsd-3-clause |
evgchz/scikit-learn | sklearn/decomposition/tests/test_pca.py | 25 | 11108 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
"""Test that probabilistic PCA scoring yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
"""Check that probabilistic PCA selects the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
rrubino/B6-SFB1102 | infodens/classifier/svr_linear.py | 1 | 1858 | '''
Created on Aug 23, 2016
@author: admin
'''
from infodens.classifier.classifier import Classifier
from sklearn.svm import LinearSVR
from sklearn.model_selection import GridSearchCV
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from scipy.stats import pearsonr
import time
class SVR_linear(Classifier):
'''
classdocs
'''
classifierName = 'Support Vector Regressor'
C = np.logspace(-5.0, 5.0, num=10, endpoint=True, base=2)
def evaluate(self):
""" Overriding default evaluate"""
y_pred = self.predict()
mse = mean_squared_error(self.ytest, y_pred)
mae = mean_absolute_error(self.ytest, y_pred)
r = pearsonr(self.ytest, y_pred)
return mae, mse, r
def train(self):
tuned_parameters = [{'C': self.C}]
print ('SVR Optimizing. This will take a while')
start_time = time.time()
clf = GridSearchCV(LinearSVR(), tuned_parameters,
n_jobs=self.threadCount, cv=5)
clf.fit(self.Xtrain, self.ytrain)
print('Done with Optimizing. it took ', time.time() -
start_time, ' seconds')
self.model = clf.best_estimator_
def runClassifier(self):
""" Overriding default running"""
all_mse = []
all_mae = []
all_r = []
#pre = []; rec = []; fsc = []
for i in range(self.n_foldCV):
self.shuffle()
self.splitTrainTest()
self.train()
mae, mse, r = self.evaluate()
all_mse.append( mse )
all_mae.append( mae )
all_r.append( r )
classifReport = "Average MAE: {0}".format( np.mean( all_mae ) )
classifReport += "\nAverage MSE: {0}".format( np.mean( all_mse ) )
classifReport += "\nAverage Pearson's r: {0}".format( np.mean( all_r ) )
#classifReport += '\nAverage Precision: ' + str(np.mean(pre))
#classifReport += '\nAverage Recall: ' + str(np.mean(rec))
#classifReport += '\nAverage F-score: ' + str(np.mean(fsc))
return classifReport
| gpl-3.0 |
iemejia/beam | sdks/python/setup.py | 1 | 11060 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK for Python setup file."""
import os
import sys
import warnings
from distutils.errors import DistutilsError
from distutils.version import StrictVersion
# Pylint and isort disagree here.
# pylint: disable=ungrouped-imports
import setuptools
from pkg_resources import DistributionNotFound
from pkg_resources import get_distribution
from pkg_resources import normalize_path
from pkg_resources import to_filename
from setuptools import Command
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from setuptools.command.test import test
class mypy(Command):
user_options = []
def initialize_options(self):
"""Abstract method that is required to be overwritten"""
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
def get_project_path(self):
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
project_path = normalize_path(ei_cmd.egg_base)
return os.path.join(project_path, to_filename(ei_cmd.egg_name))
def run(self):
import subprocess
args = ['mypy', self.get_project_path()]
result = subprocess.call(args)
if result != 0:
raise DistutilsError("mypy exited with status %d" % result)
def get_version():
global_names = {}
exec( # pylint: disable=exec-used
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
PACKAGE_NAME = 'apache-beam'
PACKAGE_VERSION = get_version()
PACKAGE_DESCRIPTION = 'Apache Beam SDK for Python'
PACKAGE_URL = 'https://beam.apache.org'
PACKAGE_DOWNLOAD_URL = 'https://pypi.python.org/pypi/apache-beam'
PACKAGE_AUTHOR = 'Apache Software Foundation'
PACKAGE_EMAIL = '[email protected]'
PACKAGE_KEYWORDS = 'apache beam'
PACKAGE_LONG_DESCRIPTION = '''
Apache Beam is a unified programming model for both batch and streaming
data processing, enabling efficient execution across diverse distributed
execution engines and providing extensibility points for connecting to
different technologies and user communities.
'''
REQUIRED_PIP_VERSION = '7.0.0'
_PIP_VERSION = get_distribution('pip').version
if StrictVersion(_PIP_VERSION) < StrictVersion(REQUIRED_PIP_VERSION):
warnings.warn(
"You are using version {0} of pip. " \
"However, version {1} is recommended.".format(
_PIP_VERSION, REQUIRED_PIP_VERSION
)
)
REQUIRED_CYTHON_VERSION = '0.28.1'
try:
_CYTHON_VERSION = get_distribution('cython').version
if StrictVersion(_CYTHON_VERSION) < StrictVersion(REQUIRED_CYTHON_VERSION):
warnings.warn(
"You are using version {0} of cython. " \
"However, version {1} is recommended.".format(
_CYTHON_VERSION, REQUIRED_CYTHON_VERSION
)
)
except DistributionNotFound:
# do nothing if Cython is not installed
pass
try:
# pylint: disable=wrong-import-position
from Cython.Build import cythonize
except ImportError:
cythonize = lambda *args, **kwargs: []
REQUIRED_PACKAGES = [
# Avro 1.9.2 for python3 was broken. The issue was fixed in version 1.9.2.1
'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'crcmod>=1.7,<2.0',
# dataclasses backport for python_version<3.7. No version bound because this
# is Python standard since Python 3.7 and each Python version is compatible
# with a specific dataclasses version.
'dataclasses;python_version<"3.7"',
# orjson, only available on Python 3.6 and above
'orjson<4.0;python_version>="3.6"',
# Dill doesn't have forwards-compatibility guarantees within minor version.
# Pickles created with a new version of dill may not unpickle using older
# version of dill. It is best to use the same version of dill on client and
# server, therefore list of allowed versions is very narrow.
# See: https://github.com/uqfoundation/dill/issues/341.
'dill>=0.3.1.1,<0.3.2',
'fastavro>=0.21.4,<2',
'future>=0.18.2,<1.0.0',
'grpcio>=1.29.0,<2',
'hdfs>=2.1.0,<3.0.0',
'httplib2>=0.8,<0.20.0',
'numpy>=1.14.3,<1.21.0',
'pymongo>=3.8.0,<4.0.0',
'oauth2client>=2.0.1,<5',
'protobuf>=3.12.2,<4',
'pyarrow>=0.15.1,<5.0.0',
'pydot>=1.2.0,<2',
'python-dateutil>=2.8.0,<3',
'pytz>=2018.3',
'requests>=2.24.0,<3.0.0',
'typing-extensions>=3.7.0,<3.8.0',
]
# [BEAM-8181] pyarrow cannot be installed on 32-bit Windows platforms.
if sys.platform == 'win32' and sys.maxsize <= 2**32:
REQUIRED_PACKAGES = [
p for p in REQUIRED_PACKAGES if not p.startswith('pyarrow')
]
REQUIRED_TEST_PACKAGES = [
'freezegun>=0.3.12',
'mock>=1.0.1,<3.0.0',
'nose>=1.3.7',
'nose_xunitmp>=0.4.1',
'pandas>=1.0,<1.3.0',
'parameterized>=0.7.1,<0.8.0',
'pyhamcrest>=1.9,!=1.10.0,<2.0.0',
'pyyaml>=3.12,<6.0.0',
'requests_mock>=1.7,<2.0',
'tenacity>=5.0.2,<6.0',
'pytest>=4.4.0,<5.0',
'pytest-xdist>=1.29.0,<2',
'pytest-timeout>=1.3.3,<2',
'sqlalchemy>=1.3,<2.0',
'psycopg2-binary>=2.8.5,<3.0.0',
'testcontainers>=3.0.3,<4.0.0',
]
GCP_REQUIREMENTS = [
'cachetools>=3.1.0,<5',
'google-apitools>=0.5.31,<0.5.32',
'google-auth>=1.18.0,<2',
'google-cloud-datastore>=1.8.0,<2',
'google-cloud-pubsub>=0.39.0,<2',
# GCP packages required by tests
'google-cloud-bigquery>=1.6.0,<3',
'google-cloud-core>=0.28.1,<2',
'google-cloud-bigtable>=0.31.1,<2',
'google-cloud-spanner>=1.13.0,<2',
'grpcio-gcp>=0.2.2,<1',
# GCP Packages required by ML functionality
'google-cloud-dlp>=0.12.0,<2',
'google-cloud-language>=1.3.0,<2',
'google-cloud-videointelligence>=1.8.0,<2',
'google-cloud-vision>=0.38.0,<2',
# GCP Package required by Google Cloud Profiler.
'google-cloud-profiler>=3.0.4,<4'
]
INTERACTIVE_BEAM = [
'facets-overview>=1.0.0,<2',
'ipython>=5.8.0,<8',
'ipykernel>=5.2.0,<6',
# Skip version 6.1.13 due to
# https://github.com/jupyter/jupyter_client/issues/637
'jupyter-client>=6.1.11,<6.1.13',
'timeloop>=1.0.2,<2',
]
INTERACTIVE_BEAM_TEST = [
# notebok utils
'nbformat>=5.0.5,<6',
'nbconvert>=5.6.1,<6',
# headless chrome based integration tests
'selenium>=3.141.0,<4',
'needle>=0.5.0,<1',
'chromedriver-binary>=91,<92',
# use a fixed major version of PIL for different python versions
'pillow>=7.1.1,<8',
]
AWS_REQUIREMENTS = [
'boto3 >=1.9'
]
AZURE_REQUIREMENTS = [
'azure-storage-blob >=12.3.2',
'azure-core >=1.7.0',
]
# We must generate protos after setup_requires are installed.
def generate_protos_first(original_cmd):
try:
# See https://issues.apache.org/jira/browse/BEAM-2366
# pylint: disable=wrong-import-position
import gen_protos
class cmd(original_cmd, object):
def run(self):
gen_protos.generate_proto_files()
super(cmd, self).run()
return cmd
except ImportError:
warnings.warn("Could not import gen_protos, skipping proto generation.")
return original_cmd
python_requires = '>=3.6'
if sys.version_info.major == 3 and sys.version_info.minor >= 9:
warnings.warn(
'This version of Apache Beam has not been sufficiently tested on '
'Python %s.%s. You may encounter bugs or missing features.' % (
sys.version_info.major, sys.version_info.minor))
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description=PACKAGE_DESCRIPTION,
long_description=PACKAGE_LONG_DESCRIPTION,
url=PACKAGE_URL,
download_url=PACKAGE_DOWNLOAD_URL,
author=PACKAGE_AUTHOR,
author_email=PACKAGE_EMAIL,
packages=setuptools.find_packages(),
package_data={'apache_beam': [
'*/*.pyx', '*/*/*.pyx', '*/*.pxd', '*/*/*.pxd', '*/*.h', '*/*/*.h',
'testing/data/*.yaml', 'portability/api/*.yaml']},
ext_modules=cythonize([
# Make sure to use language_level=3 cython directive in files below.
'apache_beam/**/*.pyx',
'apache_beam/coders/coder_impl.py',
'apache_beam/metrics/cells.py',
'apache_beam/metrics/execution.py',
'apache_beam/runners/common.py',
'apache_beam/runners/worker/logger.py',
'apache_beam/runners/worker/opcounters.py',
'apache_beam/runners/worker/operations.py',
'apache_beam/transforms/cy_combiners.py',
'apache_beam/transforms/stats.py',
'apache_beam/utils/counters.py',
'apache_beam/utils/windowed_value.py',
]),
install_requires=REQUIRED_PACKAGES,
python_requires=python_requires,
test_suite='nose.collector',
# BEAM-8840: Do NOT use tests_require or setup_requires.
extras_require={
'docs': ['Sphinx>=1.5.2,<2.0'],
'test': REQUIRED_TEST_PACKAGES,
'gcp': GCP_REQUIREMENTS,
'interactive': INTERACTIVE_BEAM,
'interactive_test': INTERACTIVE_BEAM_TEST,
'aws': AWS_REQUIREMENTS,
'azure': AZURE_REQUIREMENTS
},
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# When updating vesion classifiers, also update version warnings
# above and in apache_beam/__init__.py.
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache License, Version 2.0',
keywords=PACKAGE_KEYWORDS,
entry_points={
'nose.plugins.0.10': [
'beam_test_plugin = test_config:BeamTestPlugin',
]},
cmdclass={
'build_py': generate_protos_first(build_py),
'develop': generate_protos_first(develop),
'egg_info': generate_protos_first(egg_info),
'test': generate_protos_first(test),
'mypy': generate_protos_first(mypy),
},
)
| apache-2.0 |
steinam/teacher | jup_notebooks/data-science-ipython-notebooks-master/data/titanic/myfirstforest.py | 26 | 4081 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_bayes.py | 7 | 1577 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD Style.
import numpy as np
from numpy.testing import assert_array_equal
import nose
from ..bayes import BayesianRidge, ARDRegression
from sklearn import datasets
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise nose.SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
X_test = [[1], [3], [4]]
assert(np.abs(clf.predict(X_test) - [1, 3, 4]).sum() < 1.e-2) # identity
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
test = [[1], [3], [4]]
assert(np.abs(clf.predict(test) - [1, 3, 4]).sum() < 1.e-3) # identity
| agpl-3.0 |
dflowers7/kroneckerbio | External/sundials-2.6.2/examples/arkode/C_serial/plot_heat1D.py | 1 | 1822 | #!/usr/bin/env python
# ----------------------------------------------------------------
# Programmer(s): Daniel R. Reynolds @ SMU
# ----------------------------------------------------------------
# LLNS/SMU Copyright Start
# Copyright (c) 2015, Southern Methodist University and
# Lawrence Livermore National Security
#
# This work was performed under the auspices of the U.S. Department
# of Energy by Southern Methodist University and Lawrence Livermore
# National Laboratory under Contract DE-AC52-07NA27344.
# Produced at Southern Methodist University and the Lawrence
# Livermore National Laboratory.
#
# All rights reserved.
# For details, see the LICENSE file.
# LLNS/SMU Copyright End
# Copyright (c) 2013, Southern Methodist University.
# All rights reserved.
# For details, see the LICENSE file.
# ----------------------------------------------------------------
# matplotlib-based plotting script for heat1D.c example
# imports
import sys
import pylab as plt
import numpy as np
# load mesh data file
mesh = np.loadtxt('heat_mesh.txt', dtype=np.double)
# load solution data file
data = np.loadtxt('heat1D.txt', dtype=np.double)
# determine number of time steps, mesh size
nt,nx = np.shape(data)
# determine maximum temperature
maxtemp = 1.1*data.max()
# generate plots of results
for tstep in range(nt):
# set string constants for output plots, current time, mesh size
pname = 'heat1d.' + repr(tstep).zfill(3) + '.png'
tstr = repr(tstep)
nxstr = repr(nx)
# plot current solution and save to disk
plt.figure(1)
plt.plot(mesh,data[tstep,:])
plt.xlabel('x')
plt.ylabel('solution')
plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr)
plt.axis((0.0, 1.0, 0.0, maxtemp))
plt.grid()
plt.savefig(pname)
plt.close()
##### end of script #####
| mit |
varenius/salsa | Developer_notes/FreqResponse/SALSA_freqresponse.py | 1 | 6327 | import matplotlib.pyplot as plt
import numpy as np
freqs = [ 1. , 1.01010101 ,1.02020202 ,1.03030303 ,1.04040404, 1.05050505 ,
1.06060606, 1.07070707 ,1.08080808 ,1.09090909 ,1.1010101 , 1.11111111 ,
1.12121212, 1.13131313 ,1.14141414 ,1.15151515 ,1.16161616, 1.17171717 ,
1.18181818, 1.19191919 ,1.2020202 ,1.21212121 ,1.22222222, 1.23232323 ,
1.24242424, 1.25252525 ,1.26262626 ,1.27272727 ,1.28282828, 1.29292929 ,
1.3030303 , 1.31313131 ,1.32323232 ,1.33333333 ,1.34343434, 1.35353535 ,
1.36363636, 1.37373737 ,1.38383838 ,1.39393939 ,1.4040404 , 1.41414141 ,
1.42424242, 1.43434343 ,1.44444444 ,1.45454545 ,1.46464646, 1.47474747 ,
1.48484848, 1.49494949 ,1.50505051 ,1.51515152 ,1.52525253, 1.53535354 ,
1.54545455, 1.55555556 ,1.56565657 ,1.57575758 ,1.58585859, 1.5959596 ,
1.60606061, 1.61616162 ,1.62626263 ,1.63636364 ,1.64646465, 1.65656566 ,
1.66666667, 1.67676768 ,1.68686869 ,1.6969697 ,1.70707071, 1.71717172 ,
1.72727273, 1.73737374 ,1.74747475 ,1.75757576 ,1.76767677, 1.77777778 ,
1.78787879, 1.7979798 ,1.80808081 ,1.81818182 ,1.82828283, 1.83838384 ,
1.84848485, 1.85858586 ,1.86868687 ,1.87878788 ,1.88888889, 1.8989899 ,
1.90909091, 1.91919192 ,1.92929293 ,1.93939394 ,1.94949495, 1.95959596 ,
1.96969697, 1.97979798 ,1.98989899 ,2. ]
# Sun measured at approx 18 deg elevation
suntotpows = [2785.6330876946449, 1977.1466615200043, 1589.494863897562, 1542.7959198653698, 1557.8172065913677, 1735.6908074915409, 2129.2754965126514, 2353.7702390253544, 1972.3789359033108, 2520.4937805235386, 1282.1320186555386, 1237.6904610097408, 1405.4985212385654, 1943.0715529024601, 2858.5671704411507, 3274.8328270316124, 3564.6992583870888, 3669.2628258466721, 4586.2290814518929, 5375.4824217557907, 6455.6713807582855, 7866.3508347272873, 10485.52419924736, 10127.730884552002, 8344.336953997612, 6466.1049537658691, 5280.8985623121262, 5250.1298352479935, 5314.5343601703644, 6038.8015081882477, 6214.5116136074066, 5930.2830632925034, 4645.7253813147545, 4158.2600346207619, 3803.7233927249908, 3759.6277379989624, 4292.0511005520821, 4624.2467453479767, 5030.3273301124573, 4295.354663848877, 3536.6770436167717, 2730.3309785723686, 2416.423579454422, 2336.9050691723824, 2094.2399448156357, 2495.4593307971954, 2395.4762416183949, 1968.5652746260166, 1370.5805124044418, 1260.2253520190716, 1202.7231127619743, 1206.297066539526, 1361.7773389518261, 1552.519161939621, 1828.438550055027, 1775.8439630866051, 1582.4709193408489, 1556.387759834528, 832.66799978911877, 588.73582801222801, 651.42778930813074, 699.11164103448391, 619.32082325220108, 433.08832363039255, 322.50797021389008, 314.95811840891838, 374.19519168138504, 432.44684907793999, 443.08514755964279, 460.69489664584398, 455.19357943534851, 397.53214629739523, 351.27304589003325, 273.39031756296754, 232.59308376535773, 228.33922612667084, 204.66613719984889, 166.71212916448712, 138.45455680601299, 111.94994743354619, 739.86359599977732, 405.20963317155838, 72.816294245421886, 74.506069138646126, 68.830861670896411, 82.954384192824364, 83.197337875142694, 63.829409923404455, 58.214999719522893, 53.334227626211941, 47.608955097384751, 42.91682996135205, 45.085554989986122, 48.646714971400797, 51.167791061103344, 49.396051414310932, 47.978826837614179, 41.228333038277924, 38.292552176862955, 41.856090572662652]
# Measured at zenith, should be far enough from the sun to be a good zero level.
zenithtotpows = [1943.6411018371582, 1482.5802601575851, 1151.7467851042747, 1244.6229656487703, 1302.7019702196121, 2602.2718475461006, 3785.8905768990517, 5145.2179720401764, 4183.0708646774292, 2542.1989408433437, 1144.5393799543381, 854.80862618982792, 837.97461956739426, 995.92484059929848, 1162.1792143434286, 1322.1115420460701, 1325.1730933338404, 1100.4929715096951, 1139.4452214539051, 1075.5127658247948, 1099.5105756223202, 1168.342494815588, 1375.2484279870987, 1434.9314380884171, 1360.0316359996796, 1077.9956717938185, 908.95068320631981, 896.39201503992081, 983.52154520153999, 1140.434398189187, 1265.9182561039925, 1214.5917905569077, 924.46561880409718, 853.2023981064558, 744.55698086321354, 686.73946462571621, 789.24938863515854, 847.71881730854511, 855.3574857711792, 757.26768906414509, 606.07812813669443, 465.58327329158783, 437.5682440251112, 412.08593615144491, 364.77455784380436, 472.19709119945765, 516.77393741905689, 485.4437377974391, 460.91582728177309, 457.36334832012653, 479.17593301087618, 347.65261828154325, 294.29305405914783, 258.27139937877655, 265.72725412622094, 356.57809749990702, 215.72405383735895, 226.11679937317967, 162.22952026873827, 164.5044366940856, 219.81728319451213, 340.53004756569862, 547.15641456842422, 518.41886118799448, 265.53626054897904, 182.17695025354624, 127.92363779060543, 125.63371352106333, 123.76056421920657, 118.65346656739712, 117.87782165594399, 115.08676435798407, 146.01153200492263, 175.86969291046262, 295.32032546401024, 405.0511381700635, 727.21525095403194, 154.49599220976233, 104.65012673847377, 82.725409341976047, 324.13157342374325, 382.79943937063217, 73.37178741581738, 134.08102109283209, 90.209561819210649, 206.97617258131504, 422.87319120764732, 454.66064346581697, 356.65183033794165, 165.66637360677123, 109.50364381447434, 57.247826880775392, 63.019179145805538, 66.039286904036999, 72.274781942367554, 74.631841246038675, 102.46883722022176, 156.93706633150578, 220.66997799277306, 193.30993343517184]
suntotpows=np.array(suntotpows)
zenithtotpows = np.array(zenithtotpows)
plt.plot(freqs, suntotpows)
plt.plot(freqs, zenithtotpows)
plt.legend(['The Sun at 18 deg el.', 'Same on Zenith (background)'])
plt.title('SALSA Vale, 2 second measurements for 100 freq. points')
plt.xlabel('Frequency[GHz]')
plt.ylabel('Amp. (average over 2MHz) [arbitraty scale]')
plt.xlim([1, 2])
plt.savefig('both.png', dpi=300, bbox_inches='tight')
plt.figure()
plt.plot(freqs, suntotpows/zenithtotpows)
plt.legend(['The Sun / Zenith'])
plt.title('SALSA Vale, 2 second measurements for 100 freq. points')
plt.xlabel('Frequency[GHz]')
plt.ylabel('The Sun / Zenith, i.e. approx SNR')
plt.xlim([1.3, 2])
plt.savefig('snr.png', dpi=300, bbox_inches='tight')
plt.show()
| mit |
iagapov/ocelot | demos/sr/phase_tune.py | 2 | 1506 | __author__ = 'Sergey Tomin'
from ocelot.rad import *
from ocelot import *
from ocelot.gui import *
font = {'size' : 14}
matplotlib.rc('font', **font)
beam = Beam()
beam.E = 17.5
beam.I = 0.1
beam.beta_x = 12.84
beam.beta_y = 6.11
beam.Dx = 0.526
und = Undulator(Kx = 4., nperiods=125, lperiod=0.04, eid= "und")
D = Drift(l=0.5, eid="D")
b1 = Hcor(l=0.1, angle = 5*-0.00001, eid="b1")
b2 = Hcor(l=0.2, angle = 5*0.00002, eid="b2")
b3 = Hcor(l=0.1, angle = 5*-0.00001, eid="b3")
phase_shift = (b1, b2, b3)
cell = (und, D, phase_shift, D, und)
lat = MagneticLattice(cell)
screen = Screen()
screen.z = 100.0
screen.size_x = 0.0
screen.size_y = 0.0
screen.nx = 1
screen.ny = 1
screen.start_energy = 7900 #eV
screen.end_energy = 8200 #eV
screen.num_energy = 1000
print_rad_props(beam, K=und.Kx, lu=und.lperiod, L=und.l, distance=screen.z)
screen = calculate_radiation(lat, screen, beam)
# trajectory
for u in screen.motion:
plt.plot(u[4::9], u[0::9], "r")
plt.show()
show_flux(screen, unit="mrad")
und = Undulator(Kx = 4., nperiods=125, lperiod=0.04, eid= "und")
D = Drift(l=0.5, eid="D")
b1 = Hcor(l=0.1, angle = 10*-0.00001, eid="b1")
b2 = Hcor(l=0.2, angle = 10*0.00002, eid="b2")
b3 = Hcor(l=0.1, angle = 10*-0.00001, eid="b3")
phase_shift = (b1, b2, b3)
cell = (und, D, phase_shift, D, und)
lat = MagneticLattice(cell)
screen = calculate_radiation(lat, screen, beam)
# trajectory
for u in screen.motion:
plt.plot(u[4::9], u[0::9], "r")
plt.show()
show_flux(screen, unit="mrad") | gpl-3.0 |
wzbozon/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
caperren/Archives | OSU Coursework/ROB 456 - Intelligent Robotics/Homework 4 - A Star Pathfinding/hw4.py | 1 | 7720 | import csv
from matplotlib import pyplot, patches
from math import sqrt
from heapq import *
CSV_PATH = "world.csv"
VAL_TO_COLOR = {
0: "green",
1: "red",
-1: "blue"
}
EDGE_COST = 1
START_POSITION = (0, 0)
END_POSITION = (19, 19)
def import_csv_as_array(csv_path):
csv_file = open(csv_path, "rU") # Open the file
csv_reader = csv.reader(csv_file) # Put it through the csv reader
# Loop through the csv lines and append them to an array
output_array = []
for line in csv_reader:
output_array.append([int(col_val) for col_val in line])
# Delete the csv reader and close the file
del csv_reader
csv_file.close()
# Return our world map array
return output_array
def plot_grid_map(grid_map, fig_save_path=None):
# Make the plot
figure_object, axes_object = pyplot.subplots()
# Plot appropriately colored rectangles for each point on the map
for y, row in enumerate(grid_map):
for x, col in enumerate(row):
axes_object.add_patch(patches.Rectangle((x, y), 1, 1, fill=True, color=VAL_TO_COLOR[col]))
# Plot some x and y dotted lines to make it nicer to view the underlying grid
for y in range(len(grid_map)):
axes_object.plot([0, len(grid_map[0])], [y, y], color="black", alpha=0.75, linestyle=":")
for x in range(len(grid_map[0])):
axes_object.plot([x, x], [0, len(grid_map)], color="black", alpha=0.75, linestyle=":")
# Set the y limit from len(grid_map) to 0 so it matches how the file looks in terms of the map
axes_object.set_ylim([len(grid_map), 0])
axes_object.autoscale(enable=True, tight=True)
# If the optional argument to save to a file is added, output that file
if fig_save_path:
figure_object.savefig(fig_save_path, bbox_inches="tight")
# Show the plot
pyplot.show()
class AStarSolver(object):
# Directions to be used for children
VALID_DIRECTIONS = \
[
[1, 0], # E
[0, 1], # N
[-1, 0], # W
[0, -1], # S
]
def __init__(self, world, start_position, end_position):
# Initialize all the class variables
self.world_map = world
self.world_limit_x = len(self.world_map[0])
self.world_limit_y = len(self.world_map)
self.start_position = start_position
self.end_position = end_position
self.open_set = []
self.closed_set = []
self.g_scores = {}
self.f_scores = {}
self.travel_path = {}
self.final_path = []
self.solution_map = list(self.world_map)
@staticmethod
def heuristic(start_point, end_point):
# Calculate the heuristic from point a to point b using the pythagorean theorem
delta_x = abs(end_point[0] - start_point[0])
delta_y = abs(end_point[1] - start_point[1])
return sqrt(pow(delta_x, 2) + pow(delta_y, 2))
def solve_path(self):
# Add the starting node, plus it's initial f_cost
self.g_scores[self.start_position] = 0
self.f_scores[self.start_position] = self.heuristic(self.start_position, self.end_position)
# Put the starting node into the open set as (f_score, position)
# It needs to be in this form for heap sorting by f_score
heappush(self.open_set, (self.f_scores[self.start_position], self.start_position))
while self.open_set:
# Pop off the most recent node in open set with the lowest f_score
current_node = heappop(self.open_set)
# Extract the current position from the node
current_position = current_node[1]
# If we've reached the end, break so we can compute the final path
if current_position == self.end_position:
break
# Now that we've reached this node, add it to the closed set
self.closed_set.append(current_position)
# Loop through the cardinal directions we can move to
for delta_x, delta_y in self.VALID_DIRECTIONS:
# Computer the child position based on the cardinal direction and teh current position
child_position = (current_position[0] + delta_x, current_position[1] + delta_y)
# Compute the child's g_score with an edge cost of 1
child_g_score = self.g_scores[current_position] + EDGE_COST
# Check if location is in the world
valid_x_limit = 0 <= child_position[0] < self.world_limit_x
valid_y_limit = 0 <= child_position[1] < self.world_limit_y
# If it's in the world, make sure the child location is not an obstacle
valid_not_obstacle = None
if valid_x_limit and valid_y_limit:
valid_not_obstacle = self.world_map[child_position[1]][child_position[0]] != 1
# If the child is in a valid location and not an obstacle:
if valid_x_limit and valid_y_limit and valid_not_obstacle:
# Skip to the next child if we've already seen this node and the current path is more costly than
# what we've seen previously
if child_position in self.closed_set and child_g_score >= self.g_scores.get(child_position, 0):
continue
# Get a list of all positions in our open set
open_set_positions = [x[1] for x in self.open_set]
# If the score is better than what we've seen, or if we've never seen this node before, add the node
# to our open set and add this as a potential path
if child_g_score < self.g_scores.get(child_position, 0) or child_position not in open_set_positions:
self.travel_path[child_position] = current_position # Add this jump to the travel path
self.g_scores[child_position] = child_g_score # Sets the new g_score
self.f_scores[child_position] = \
child_g_score + self.heuristic(child_position, self.end_position) # Sets the new f_score
heappush(self.open_set, (self.f_scores[child_position], child_position)) # Add to open set
# Work our way backwards from the end to find the proper path
final_path = [self.end_position] # Add our last hop manually so the loop below can include our start position
current_position = self.end_position # Set the current position to the end
while current_position != self.start_position: # Keep looping until we've reached the beginning of the path
current_position = self.travel_path[current_position] # Update the current to the last path location
final_path.append(current_position) # Append this location to our final array
self.final_path = final_path[::-1] # Now that we've found the path, reverse it so it's in order
# This applies modifications to the world map with the solution so you can see the path when plotting
for x, y in self.final_path:
self.solution_map[y][x] = -1
def get_solution_map(self):
# Gives us the solution map once we've found a solution
return self.solution_map
if __name__ == '__main__':
world_map = import_csv_as_array(CSV_PATH) # Import the map
solver = AStarSolver(world_map, START_POSITION, END_POSITION) # Initialize the solver
solver.solve_path() # Solve the path
solution_map = solver.get_solution_map() # Retrieve the solution map
plot_grid_map(solution_map, "final_path.pdf") # Plot and save the solution
| gpl-3.0 |
giorgiop/scikit-learn | sklearn/metrics/base.py | 46 | 4627 | """
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 127 | 1732 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
meppe/semslam | src/frcnn/scripts/run_detect.py | 1 | 9843 | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
print ("starting detection")
import rospkg
from rospkg import rospack
import struct
import pickle
import time
import errno
import sys
import rospy
from std_msgs.msg import String
from lsd_slam_viewer.msg import keyframeMsg
from lsd_slam_viewer.msg import keyframeGraphMsg
from semslam_msgs.msg import objectBBMsg
from PIL import Image
import os,sys
ros_slam_path = "/home/meppe/Coding/semslam"
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/caffe-fast-rcnn/python")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/lib")
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
# from numpy import uint8
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
print("imports done!")
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
#
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
#
# DETECTION_RUNNING = False
NEW_DETECTION = False
VIS_RUNNING = False
# DETECT_RUNNING = False
current_scores = []
current_boxes = []
current_kf = None
current_kf_id = None
min_c = 255
max_c = 0
CONF_THRESH = 0.2
NMS_THRESH = 0.1
def pub_detections(pub, class_name, dets, thresh=0.5):
global current_kf_id
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
print("Publishing detection of class " + str(class_name))
frameId = 0
isKeyFrame = True
class_name = str(class_name)
highscorebb = None
highscore = 0
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
if score > highscore:
highscorebb = bbox
highscore = score
bbMsg = objectBBMsg(frameId, isKeyFrame, highscorebb, class_name, highscore)
print("publishing bb" + str(bbMsg))
pub.publish(bbMsg)
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
print("Visualizing detection of class " + str(class_name))
# switch red and blue
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
# plt.draw()
plt.savefig("kf_"+str(current_kf_id) + "_" + str(class_name) + ".png")
print("image drawn")
def frame_detect(net, im=None):
"""Detect object classes in an image using pre-computed object proposals."""
global NEW_DETECTION, VIS_RUNNING, current_scores, current_boxes, current_kf
if im is None:
im = current_kf
# DETECT_RUNNING = True
print("starting object detection")
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
current_scores, current_boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, current_boxes.shape[0])
NEW_DETECTION = True
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
def cb_keyframe_received(msg, net=None):
global current_kf, current_kf_id, min_c, max_c, VIS_RUNNING, NEW_DETECTION
print("Keyframe {} received".format(msg.id))
if VIS_RUNNING:
print ("Visualization of last KF still running, can not detect any more objects and will ignore this keyframe")
return
if NEW_DETECTION:
print ("Object detection for last KF still running, can not detect any more objects and will ignore this keyframe")
return
# print("time: " + str(msg.time))
# print("isKeyframe: " + str(msg.isKeyframe))
# print("camToWorld: " + str(msg.camToWorld))
# print("fx, fy, cx, cy: " + str(msg.fx) + ", " + str(msg.fy) + ", " + str(msg.cx) + ", " + str(msg.cy))
# print("height, width: " + str(msg.height) + ", " + str(msg.width))
# print("pointcloud: ...")
structSizeInputPointDense = 12
height = 480
width = 640
channels = 3 # 4 channels should mean that the data comes in CMYK format from LSD_SLAM
im_shape = (height, width, channels)
num_points = width * height
timer = Timer()
timer.tic()
im = np.zeros(shape=im_shape, dtype=np.uint8)
fmt = "<ffBBBB"
for p in range(num_points):
(d, v, c, m, y, k) = struct.unpack_from(fmt, msg.pointcloud, p*structSizeInputPointDense)
row = int(p % width)
line = int(p / width)
# r,g,b = cmyk_to_rgb(c,m,y,255)
if c != m or c != k:
print "c != m or c != k"
# blue
im[line, row, 0] = int(c)
# green
im[line, row, 1] = int(c)
# red
im[line, row, 2] = int(c)
timer.toc()
print("It took {} sec. to deserialize binary data from lsd_slam_keyframe_msg.".format(timer.total_time))
current_kf = im
current_kf_id = msg.id
if net is not None:
frame_detect(net)
# plt.show()
cv2.imwrite("kf_"+str(current_kf_id)+".png", current_kf)
print("Waiting for next KF.")
def fake_detect(fname="pics/kf_20542.png",net=None):
global NEW_DETECTION, current_kf, current_kf_id
NEW_DETECTION = True
try:
open(fname)
except OSError as e:
if e.errno == errno.ENOENT:
print("File not found")
exit(1)
else:
raise
current_kf = cv2.imread(fname)
#
current_kf_id = "fake_detect_frame"
if net is not None:
frame_detect(net)
if __name__ == '__main__':
rospy.init_node("frcnn")
print("node initialized")
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Not sure if this is necesary. Leaving it for now, bu should test later what the effect of this warmup is...
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
sub_keyframes = rospy.Subscriber("/lsd_slam/keyframes", keyframeMsg , cb_keyframe_received,
queue_size=1,callback_args=net)
bb_pub = rospy.Publisher('frcnn/bb', objectBBMsg, queue_size=1)
# fake_detect(net=net)
ctr = 0
# if True:
while True:
# Visualize detections for each class
time.sleep(0.5)
if NEW_DETECTION:
VIS_RUNNING = True
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = current_boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = current_scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
pub_detections(bb_pub, cls, dets, thresh=CONF_THRESH)
vis_detections(current_kf, cls, dets, thresh=CONF_THRESH)
# break
ctr += 1
NEW_DETECTION = False
VIS_RUNNING = False
# plt.show()
| gpl-3.0 |
adiyoss/DeepVOT | visualization/measurement/display_features.py | 1 | 3977 | import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
__author__ = 'adiyoss'
graphIndex = 0
graphTitle = 0
predict_line = 1
label_line = 1
def ontype(event):
global graphIndex
global graphTitle
global predict_line
global label_line
if event.key == 'q':
sys.exit(0)
elif event.key == 'right':
graphIndex -= 1
graphTitle -= 1
elif event.key == 'left':
graphIndex += 1
graphTitle += 1
elif event.key == 'p':
if predict_line == 1:
predict_line = 0
else:
predict_line = 1
elif event.key == 'l':
if label_line == 1:
label_line = 0
else:
label_line = 1
else:
return
plt.close()
def display_features(filename, frame_begin_and_end_real, frame_begin_and_end_predict):
global graphIndex, predict_plot, labels_plot
if os.path.isfile(filename) is False:
sys.stderr.write("WARNING: file not found, " + str(filename))
labels = frame_begin_and_end_real.split('-')
predict = frame_begin_and_end_predict.split('-')
m = np.loadtxt(filename)
feature_names = ['Short Term Energy', 'Total Energy', 'Low Energy', 'High Energy', 'Wiener Entropy',
'Auto Correlation', 'Pitch', 'Voicing', 'Zero Crossing',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21', '22', '23', '24', '25', '26', '27', '28', '29', '30',
'31', '32', '33', '34', '35', '36', '37', '38', '39', '40',
'41', '42', '43', '44', '45', '46', '47', '48', '49', '50',
'51', '52', '53', '54']
while True:
index = graphIndex % len(m[0])
title_index = graphTitle % len(feature_names)
fig = plt.figure(1, figsize=(20, 10))
fig.canvas.mpl_connect('key_press_event', ontype)
fig.suptitle(feature_names[title_index], fontsize='x-large', style='italic', fontweight='bold')
max_m = np.max(m[:, index])
min_m = np.min(m[:, index])
width = float(0.6)
plt.plot((m[:, index]), linestyle='-', linewidth=width, color='#006699')
if label_line == 1:
labels_plot, = plt.plot([labels[0], labels[0]], [min_m, max_m], linestyle='-.', color="#730A0A", lw=2)
plt.plot([labels[1], labels[1]], [min_m, max_m], linestyle='-.', color="#730A0A", lw=2)
if predict_line == 1:
predict_plot, = plt.plot([predict[0], predict[0]], [min_m, max_m], linestyle='-.', color='#335C09', lw=2)
plt.plot([predict[1], predict[1]], [min_m, max_m], linestyle='-.', color='#335C09', lw=2)
plt.xlim(xmin=0, xmax=len(m))
# plot the legend
plt.figtext(0.13, 0.05, 'Q: quit', style='italic')
plt.figtext(0.2, 0.05, 'P: Enable/disable prediction marks', style='italic')
plt.figtext(0.38, 0.05, "L: Enable/disable label marks", style='italic')
plt.figtext(0.13, 0.02, 'Left arrow: Next figure', style='italic')
plt.figtext(0.38, 0.02, 'Right arrow: Previous figure', style='italic')
l2 = plt.legend([labels_plot, predict_plot], ["Real Label", "Predict Label"])
plt.gca().add_artist(l2) # add l1 as a separate artist to the axes
plt.show()
# ------------- MENU -------------- #
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Features Visualization")
parser.add_argument("features_file", help="The path to features file")
parser.add_argument("labels", help="The vot onset and offset, separated by dash, e.g. 100-108")
parser.add_argument("prediction", help="The vot onset and offset prediction, separated by dash, e.g. 100-108")
args = parser.parse_args()
# run the script
display_features(args.features_file, args.labels, args.prediction)
| mit |
DR08/mxnet | example/autoencoder/data.py | 27 | 1272 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
cpcloud/dask | dask/array/percentile.py | 1 | 6350 | from __future__ import absolute_import, division, print_function
from itertools import count
from functools import wraps
from collections import Iterator
import numpy as np
from toolz import merge, merge_sorted
from .core import Array
from ..base import tokenize
from .. import sharedict
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
names = ('percentile-%d' % i for i in count(1))
def percentile(a, q, interpolation='linear'):
""" Approximate percentile of 1-D array
See numpy.percentile for more information
"""
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
dsk = dict(((name, i), (_percentile, (key), q, interpolation))
for i, key in enumerate(a._keys()))
name2 = 'percentile-' + token
dsk2 = {(name2, 0): (merge_percentiles, q, [q] * len(a.chunks[0]),
sorted(dsk), a.chunks[0], interpolation)}
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (np.array([], dtype=dtype) / 0.5).dtype
dsk = merge(dsk, dsk2)
dsk = sharedict.merge(a.dask, (name2, dsk))
return Array(dsk, name2, chunks=((len(q),),), dtype=dtype)
def merge_percentiles(finalq, qs, vals, Ns, interpolation='lower'):
""" Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of numpy.arrays
Percentiles calculated on different sets of data.
vals : sequence of numpy.arrays
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see numpy.percentile.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if str(vals[0].dtype) == 'category':
result = merge_percentiles(finalq, qs, [v.codes for v in vals], Ns, interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = 'nearest'
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError('qs, vals, and Ns parameters must be the same length')
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
# >95% of the time in this function is spent in `merge_sorted` below.
# An alternative that uses numpy sort is shown. It is sometimes
# comparable to, but typically slower than, `merge_sorted`.
#
# >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))
# >>> A.sort(0, kind='mergesort')
combined_vals_counts = merge_sorted(*map(zip, vals, counts))
combined_vals, combined_counts = zip(*combined_vals_counts)
combined_vals = np.array(combined_vals)
combined_counts = np.array(combined_counts)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == 'linear':
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side='left')
right = np.searchsorted(combined_q, desired_q, side='right') - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == 'lower':
rv = combined_vals[lower]
elif interpolation == 'higher':
rv = combined_vals[upper]
elif interpolation == 'midpoint':
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == 'nearest':
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError("interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'")
return rv
| bsd-3-clause |
ppinard/matplotlib-scalebar | matplotlib_scalebar/test_scalebar.py | 1 | 9441 | #!/usr/bin/env python
""" """
# Standard library modules.
# Third party modules.
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from matplotlib.font_manager import FontProperties
import numpy as np
import pytest
# Local modules.
from matplotlib_scalebar.scalebar import ScaleBar
# Globals and constants variables.
@pytest.fixture
@cleanup
def scalebar():
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
ax.imshow(data)
scalebar = ScaleBar(0.5)
ax.add_artist(scalebar)
yield scalebar
plt.draw()
def test_mpl_rcParams_update():
"""
Test if scalebar params are updated accurately in matplotlib rcParams
"""
params = {
"scalebar.length_fraction": 0.2,
"scalebar.width_fraction": 0.01,
"scalebar.location": "upper right",
"scalebar.pad": 0.2,
"scalebar.border_pad": 0.1,
"scalebar.sep": 5,
"scalebar.frameon": True,
"scalebar.color": "k",
"scalebar.box_color": "w",
"scalebar.box_alpha": 1.0,
"scalebar.scale_loc": "bottom",
"scalebar.label_loc": "top",
"scalebar.rotation": "horizontal",
}
matplotlib.rcParams.update(params)
for key, value in params.items():
assert matplotlib.rcParams[key] == value
def test_scalebar_dx_m(scalebar):
assert scalebar.get_dx() == pytest.approx(0.5, abs=1e-2)
assert scalebar.dx == pytest.approx(0.5, abs=1e-2)
scalebar.set_dx(0.2)
assert scalebar.get_dx() == pytest.approx(0.2, abs=1e-2)
assert scalebar.dx == pytest.approx(0.2, abs=1e-2)
scalebar.dx = 0.1
assert scalebar.get_dx() == pytest.approx(0.1, abs=1e-2)
assert scalebar.dx == pytest.approx(0.1, abs=1e-2)
def test_scalebar_length_fraction(scalebar):
assert scalebar.get_length_fraction() is None
assert scalebar.length_fraction is None
scalebar.set_length_fraction(0.2)
assert scalebar.get_length_fraction() == pytest.approx(0.2, abs=1e-2)
assert scalebar.length_fraction == pytest.approx(0.2, abs=1e-2)
scalebar.length_fraction = 0.1
assert scalebar.get_length_fraction() == pytest.approx(0.1, abs=1e-2)
assert scalebar.length_fraction == pytest.approx(0.1, abs=1e-2)
with pytest.raises(ValueError):
scalebar.set_length_fraction(0.0)
with pytest.raises(ValueError):
scalebar.set_length_fraction(1.1)
@pytest.mark.filterwarnings("ignore")
def test_scalebar_height_fraction(scalebar):
with pytest.deprecated_call():
assert scalebar.get_height_fraction() is None
with pytest.deprecated_call():
assert scalebar.height_fraction is None
with pytest.deprecated_call():
scalebar.set_height_fraction(0.2)
assert scalebar.get_height_fraction() == pytest.approx(0.2, abs=1e-2)
assert scalebar.height_fraction == pytest.approx(0.2, abs=1e-2)
with pytest.deprecated_call():
scalebar.height_fraction = 0.1
assert scalebar.get_height_fraction() == pytest.approx(0.1, abs=1e-2)
assert scalebar.height_fraction == pytest.approx(0.1, abs=1e-2)
with pytest.raises(ValueError), pytest.deprecated_call():
scalebar.set_height_fraction(0.0)
with pytest.raises(ValueError), pytest.deprecated_call():
scalebar.set_height_fraction(1.1)
def test_scalebar_location(scalebar):
assert scalebar.get_location() is None
assert scalebar.location is None
scalebar.set_location("upper right")
assert scalebar.get_location() == 1
assert scalebar.location == 1
scalebar.location = "lower left"
assert scalebar.get_location() == 3
assert scalebar.location == 3
def test_scalebar_loc(scalebar):
assert scalebar.get_loc() is None
assert scalebar.loc is None
scalebar.set_location("upper right")
assert scalebar.get_loc() == 1
assert scalebar.loc == 1
scalebar.location = "lower left"
assert scalebar.get_loc() == 3
assert scalebar.loc == 3
scalebar.set_loc("lower right")
assert scalebar.get_loc() == 4
assert scalebar.loc == 4
scalebar.location = "upper left"
assert scalebar.get_loc() == 2
assert scalebar.loc == 2
with pytest.raises(ValueError):
ScaleBar(1.0, loc="upper right", location="upper left")
with pytest.raises(ValueError):
ScaleBar(1.0, loc="upper right", location=2)
def test_scalebar_pad(scalebar):
assert scalebar.get_pad() is None
assert scalebar.pad is None
scalebar.set_pad(4.0)
assert scalebar.get_pad() == pytest.approx(4.0, abs=1e-2)
assert scalebar.pad == pytest.approx(4.0, abs=1e-2)
scalebar.pad = 5.0
assert scalebar.get_pad() == pytest.approx(5.0, abs=1e-2)
assert scalebar.pad == pytest.approx(5.0, abs=1e-2)
def test_scalebar_border_pad(scalebar):
assert scalebar.get_border_pad() is None
assert scalebar.border_pad is None
scalebar.set_border_pad(4)
assert scalebar.get_border_pad() == pytest.approx(4.0, abs=1e-2)
assert scalebar.border_pad == pytest.approx(4.0, abs=1e-2)
scalebar.border_pad = 5
assert scalebar.get_border_pad() == pytest.approx(5.0, abs=1e-2)
assert scalebar.border_pad == pytest.approx(5.0, abs=1e-2)
def test_scalebar_sep(scalebar):
assert scalebar.get_sep() is None
assert scalebar.sep is None
scalebar.set_sep(4)
assert scalebar.get_sep() == pytest.approx(4.0, abs=1e-2)
assert scalebar.sep == pytest.approx(4.0, abs=1e-2)
scalebar.sep = 5
assert scalebar.get_sep() == pytest.approx(5.0, abs=1e-2)
assert scalebar.sep == pytest.approx(5.0, abs=1e-2)
def test_scalebar_frameon(scalebar):
assert scalebar.get_frameon() is None
assert scalebar.frameon is None
scalebar.set_frameon(True)
assert scalebar.get_frameon()
assert scalebar.frameon
scalebar.frameon = False
assert not scalebar.get_frameon()
assert not scalebar.frameon
def test_scalebar_font_properties(scalebar):
assert isinstance(scalebar.get_font_properties(), FontProperties)
assert isinstance(scalebar.font_properties, FontProperties)
scalebar.set_font_properties(dict(family="serif", size=9))
assert scalebar.font_properties.get_family() == ["serif"]
assert scalebar.font_properties.get_size() == 9
scalebar.font_properties = dict(family="sans serif", size=12)
assert scalebar.font_properties.get_family() == ["sans serif"]
assert scalebar.font_properties.get_size() == 12
with pytest.raises(ValueError):
scalebar.set_font_properties(2.0)
with pytest.raises(ValueError):
scalebar.font_properties = 2.0
def test_matplotlibrc(scalebar):
matplotlib.rcParams["scalebar.box_color"] = "r"
def test_scalebar_fixed_value(scalebar):
assert scalebar.get_fixed_value() is None
assert scalebar.fixed_value is None
scalebar.set_fixed_value(0.2)
assert scalebar.get_fixed_value() == pytest.approx(0.2, abs=1e-2)
assert scalebar.fixed_value == pytest.approx(0.2, abs=1e-2)
scalebar.fixed_value = 0.1
assert scalebar.get_fixed_value() == pytest.approx(0.1, abs=1e-2)
assert scalebar.fixed_value == pytest.approx(0.1, abs=1e-2)
def test_scalebar_fixed_units(scalebar):
assert scalebar.get_fixed_units() is None
assert scalebar.fixed_units is None
scalebar.set_fixed_units("m")
assert scalebar.get_fixed_units() == "m"
assert scalebar.fixed_units == "m"
scalebar.fixed_units = "um"
assert scalebar.get_fixed_units() == "um"
assert scalebar.fixed_units == "um"
def test_scale_formatter(scalebar):
scalebar.dx = 1
scalebar.units = "m"
_length, value, units = scalebar._calculate_best_length(10)
assert scalebar.scale_formatter(value, units) == "5 m"
scalebar.scale_formatter = lambda *_: "test"
assert scalebar.scale_formatter(value, units) == "test"
scalebar.scale_formatter = lambda value, unit: "{} {}".format(unit, value)
assert scalebar.scale_formatter(value, units) == "m 5"
def test_label_formatter(scalebar):
scalebar.dx = 1
scalebar.units = "m"
_length, value, units = scalebar._calculate_best_length(10)
with pytest.deprecated_call():
assert scalebar.label_formatter(value, units) == "5 m"
with pytest.deprecated_call():
scalebar.label_formatter = lambda *_: "test"
assert scalebar.label_formatter(value, units) == "test"
with pytest.deprecated_call():
scalebar.label_formatter = lambda value, unit: "{} {}".format(unit, value)
assert scalebar.label_formatter(value, units) == "m 5"
@pytest.mark.parametrize("rotation", ["horizontal", "vertical"])
def test_rotation(scalebar, rotation):
assert scalebar.get_rotation() is None
assert scalebar.rotation is None
scalebar.set_rotation(rotation)
assert scalebar.get_rotation() == rotation
assert scalebar.rotation == rotation
with pytest.raises(ValueError):
scalebar.set_rotation("h")
def test_warnings():
with pytest.warns(None) as record:
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
ax.imshow(data)
scalebar = ScaleBar(0.5)
ax.add_artist(scalebar)
plt.draw()
assert len(record) == 0, "Warnings: " + ",".join(
f"{repr(w.message)}" for w in record
)
| bsd-2-clause |
jzt5132/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
kelseynlucas/Pressure-based-force-calculation-for-foils | queen2_fulltimetrace_tail.py | 1 | 4270 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 17 15:39:16 2015
@author: Kelsey
"""
#Load some built-in file-reading functions
from os import listdir
from os.path import isfile, join
def queen2_results_folders(directory):
"""
Reads a file path (directory) - for a test type - and finds the folders
within the directory with queen2 results. Returns a list of the data folders.
Input
-directory - file path for a test type containing folders with queen2
data. Note that all '\'s in the default file path must be
changed to '/'. No ending slash
"""
#If have a subfolder (not a file), get name and add to list of subfolders
parentVids = [f for f in listdir(directory) if not isfile(join(directory,f))]
#Initialize storage for data folders with queen2 results
queen2folders = {}
#for each parent vid, get the queen2 results folder
for vid in parentVids:
#set filepath to the video's queen2 results folder
path = directory + '/' + vid + '/results_3cyc'
#add to the dictionary
queen2folders[vid] = path
#return the dictionary
return queen2folders
#import the dataframe package
import pandas as pd
#note this function was copied from the queen2_phaseavg.py script, since it is
#also needed in this script
def queen2_results(folder):
"""
Reads in a folder path where queen2 data are stored. Retrieves the queen2
data files and combines to a dataframe. Applies converstion from N/m and
Nm/m to N and Nmm.
Input
-folder - folder where queen2 data are stored. All '\'s in the default
file path must be changed to '/'.
"""
#set file name and path for Fx, Fy, and Tz data
PFx_file = folder + '/queen2_rodsim_dT10ms_xforce_fixed.xlsx'
PFy_file = folder + '/queen2_rodsim_dT10ms_yforce_fixed.xlsx'
PTz_file = folder + '/queen2_rodsim_dT10ms_torque_fixed.xlsx'
#Read in Fx data
PFx = pd.read_excel(PFx_file, sheetname = 'Sheet1', header = None)
#Data is natively in one row. Need it in one column, so transpose.
PFx = PFx.T
#Label the column something informative so it can be retrieved later
PFx.columns = ['PFx']
#Scale the pressure code data from N/m to m by multiplying by foil depth.
#Sign is assigned to match the definition of +/- given by the Flapper.
PFx['PFx'] = PFx['PFx']*-1.
#repeat above for Fy
PFy = pd.read_excel(PFy_file, sheetname = 'Sheet1', header = None)
PFy = PFy.T
PFy.columns = ['PFy']
PFy['PFy'] = PFy['PFy']*1.
#repeat above for Tz
PTz = pd.read_excel(PTz_file, sheetname = 'Sheet1', header = None)
PTz = PTz.T
PTz.columns = ['PTz']
PTz['PTz'] = PTz['PTz']*-1000.
#Create a time sequence
time = range(0, len(PFx['PFx']))
#scale the time to ms
time = [t/100. for t in time]
#Store time sequence in final dataframe
combo = pd.DataFrame(time, columns=['time'])
#Add Fx, Fy, and Tz to the final dataframe
combo['PFx'] = PFx['PFx']
combo['PFy'] = PFy['PFy']
combo['PTz'] = PTz['PTz']
#return the Fx, Fy, Tz, and time data for the test
return combo
#make a list of all the test types
tests = ['tail_gap','tail_mid']
#ID the directory containing the results for ALL the tests
directory = 'G:/Nonunif_performance_PIV/Pressure code analysis/'
#for each test,
for t in tests:
#make the full path for the test's data folder
path = directory + t
#ID all the folders containing queen2 results
folders = queen2_results_folders(path)
#for each folder with queen2 data,
for f in folders.keys():
#retrieve the queen2 data and store it in one dataframe
data = queen2_results(folders[f])
#save out the data combined into one file
finalpath = folders[f] + '/queen2_results_fulltimetrace_fixed_axisfix.xlsx'
data.to_excel(finalpath)
#confirm done
print 'Saved ' + str(f) + ' file as ' + finalpath.split('/')[-1] | gpl-3.0 |
darribas/pysal | pysal/esda/mapclassify.py | 3 | 50683 | """
A module of classification schemes for choropleth mapping.
"""
__author__ = "Sergio J. Rey"
__all__ = ['Map_Classifier', 'quantile', 'Box_Plot', 'Equal_Interval',
'Fisher_Jenks', 'Fisher_Jenks_Sampled', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Max_P_Classifier', 'Maximum_Breaks', 'Natural_Breaks',
'Quantiles', 'Percentiles', 'Std_Mean', 'User_Defined',
'gadf', 'K_classifiers']
from pysal.common import *
K = 5 # default number of classes in any map scheme with this as an argument
def quantile(y, k=4):
"""
Calculates the quantiles for an array
Parameters
----------
y : array
(n,1), values to classify
k : int
number of quantiles
Returns
-------
implicit : array
(n,1), quantile values
Examples
--------
>>> x = np.arange(1000)
>>> quantile(x)
array([ 249.75, 499.5 , 749.25, 999. ])
>>> quantile(x, k = 3)
array([ 333., 666., 999.])
>>>
Note that if there are enough ties that the quantile values repeat, we
collapse to pseudo quantiles in which case the number of classes will be less than k
>>> x = [1.0] * 100
>>> x.extend([3.0] * 40)
>>> len(x)
140
>>> y = np.array(x)
>>> quantile(y)
array([ 1., 3.])
"""
w = 100. / k
p = np.arange(w, 100 + w, w)
if p[-1] > 100.0:
p[-1] = 100.0
q = np.array([stats.scoreatpercentile(y, pct) for pct in p])
return np.unique(q)
def binC(y, bins):
"""
Bin categorical/qualitative data
Parameters
----------
y : array
(n,q), categorical values
bins : array
(k,1), unique values associated with each bin
Return
------
b : array
(n,q), bin membership, values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 8, (10, 3))
>>> bins = range(2, 8)
>>> x
array([[7, 5, 6],
[2, 3, 5],
[7, 2, 2],
[3, 6, 7],
[6, 3, 4],
[6, 7, 4],
[6, 5, 6],
[4, 6, 7],
[4, 6, 3],
[3, 2, 7]])
>>> y = binC(x, bins)
>>> y
array([[5, 3, 4],
[0, 1, 3],
[5, 0, 0],
[1, 4, 5],
[4, 1, 2],
[4, 5, 2],
[4, 3, 4],
[2, 4, 5],
[2, 4, 1],
[1, 0, 5]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
for i, bin in enumerate(bins):
b[np.nonzero(y == bin)] = i
# check for non-binned items and print a warning if needed
vals = set(y.flatten())
for val in vals:
if val not in bins:
print 'warning: value not in bin: ', val
print 'bins: ', bins
return b
def bin(y, bins):
"""
bin interval/ratio data
Parameters
----------
y : array
(n,q), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
b : array
(n,q), values of values between 0 and k-1
Examples
--------
>>> np.random.seed(1)
>>> x = np.random.randint(2, 20, (10, 3))
>>> bins = [10, 15, 20]
>>> b = bin(x, bins)
>>> x
array([[ 7, 13, 14],
[10, 11, 13],
[ 7, 17, 2],
[18, 3, 14],
[ 9, 15, 8],
[ 7, 13, 12],
[16, 6, 11],
[19, 2, 15],
[11, 11, 9],
[ 3, 2, 19]])
>>> b
array([[0, 1, 1],
[0, 1, 1],
[0, 2, 0],
[2, 0, 1],
[0, 1, 0],
[0, 1, 1],
[2, 0, 1],
[2, 0, 1],
[1, 1, 0],
[0, 0, 2]])
>>>
"""
if np.rank(y) == 1:
k = 1
n = np.shape(y)[0]
else:
n, k = np.shape(y)
b = np.zeros((n, k), dtype='int')
i = len(bins)
if type(bins) != list:
bins = bins.tolist()
binsc = copy.copy(bins)
while binsc:
i -= 1
c = binsc.pop(-1)
b[np.nonzero(y <= c)] = i
return b
def bin1d(x, bins):
"""
place values of a 1-d array into bins and determine counts of values in
each bin
Parameters
----------
x : array
(n, 1), values to bin
bins : array
(k,1), upper bounds of each bin (monotonic)
Returns
-------
binIds : array
1-d array of integer bin Ids
counts: int
number of elements of x falling in each bin
Examples
--------
>>> x = np.arange(100, dtype = 'float')
>>> bins = [25, 74, 100]
>>> binIds, counts = bin1d(x, bins)
>>> binIds
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2])
>>> counts
array([26, 49, 25])
"""
left = [-sys.maxint]
left.extend(bins[0:-1])
right = bins
cuts = zip(left, right)
k = len(bins)
binIds = np.zeros(x.shape, dtype='int')
while cuts:
k -= 1
l, r = cuts.pop(-1)
binIds += (x > l) * (x <= r) * k
counts = np.bincount(binIds)
return (binIds, counts)
def load_example():
"""
Helper function for doc tests
"""
import pysal
np.random.seed(10)
dat = pysal.open(pysal.examples.get_path('calempdensity.csv'))
cal = np.array([record[-1] for record in dat])
return cal
def natural_breaks(values, k=5, itmax=100):
"""
natural breaks helper function
"""
values = np.array(values)
n = len(values)
uv = np.unique(values)
uvk = len(uv)
if uvk < k:
print 'Warning: Not enough unique values in array to form k classes'
print "Warning: setting k to %d" % uvk
k = uvk
sids = np.random.permutation(range(len(uv)))[0:k]
seeds = uv[sids]
seeds.sort()
diffs = abs(np.matrix([values - seed for seed in seeds]))
c0 = diffs.argmin(axis=0)
c0 = np.array(c0)[0]
solving = True
solved = False
rk = range(k)
it = 0
while solving:
# get centroids of clusters
seeds = [np.median(values[c0 == c]) for c in rk]
seeds.sort()
# for each value find closest centroid
diffs = abs(np.matrix([values - seed for seed in seeds]))
# assign value to that centroid
c1 = diffs.argmin(axis=0)
c1 = np.array(c1)[0]
#compare new classids to previous
d = abs(c1 - c0)
if d.sum() == 0:
solving = False
solved = True
else:
c0 = c1
it += 1
if it == itmax:
solving = False
class_ids = c1
cuts = [max(values[c1 == c]) for c in rk]
return sids, seeds, diffs, class_ids, solved, it, cuts
def _fisher_jenks_means(values, classes=5, sort=True):
"""
Jenks Optimal (Natural Breaks) algorithm implemented in Python.
The original Python code comes from here:
http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/
and is based on a JAVA and Fortran code available here:
https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html
Returns class breaks such that classes are internally homogeneous while
assuring heterogeneity among classes.
"""
if sort:
values.sort()
mat1 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat1.append(temp)
mat2 = []
for i in range(0, len(values) + 1):
temp = []
for j in range(0, classes + 1):
temp.append(0)
mat2.append(temp)
for i in range(1, classes + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(values) + 1):
mat2[j][i] = float('inf')
v = 0.0
for l in range(2, len(values) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(values[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, classes + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(values)
kclass = []
for i in range(0, classes + 1):
kclass.append(0)
kclass[classes] = float(values[len(values) - 1])
kclass[0] = float(values[0])
countNum = classes
while countNum >= 2:
pivot = mat1[k][countNum]
id = int(pivot - 2)
kclass[countNum - 1] = values[id]
k = int(pivot - 1)
countNum -= 1
return kclass
class Map_Classifier:
"""
Abstract class for all map classifications
For an array :math:`y` of :math:`n` values, a map classifier places each value
:math:`y_i` into one of :math:`k` mutually exclusive and exhaustive classes.
Each classifer defines the classes based on different criteria, but in all
cases the following hold for the classifiers in PySAL:
.. math::
C_j^l < y_i \le C_j^u \ forall i \in C_j
where :math:`C_j` denotes class :math:`j` which has lower bound :math:`C_j^l` and upper bound :math:`C_j^u`.
Map Classifiers Supported
* :class:`~pysal.esda.mapclassify.Box_Plot`
* :class:`~pysal.esda.mapclassify.Equal_Interval`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks`
* :class:`~pysal.esda.mapclassify.Fisher_Jenks_Sampled`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Forced`
* :class:`~pysal.esda.mapclassify.Jenks_Caspall_Sampled`
* :class:`~pysal.esda.mapclassify.Max_P_Classifier`
* :class:`~pysal.esda.mapclassify.Maximum_Breaks`
* :class:`~pysal.esda.mapclassify.Natural_Breaks`
* :class:`~pysal.esda.mapclassify.Quantiles`
* :class:`~pysal.esda.mapclassify.Percentiles`
* :class:`~pysal.esda.mapclassify.Std_Mean`
* :class:`~pysal.esda.mapclassify.User_Defined`
Utilities:
In addition to the classifiers, there are several utility functions that can be used to evaluate the properties of a specific classifier for different parameter values, or for automatic selection of a classifier and number of classes.
* :func:`~pysal.esda.mapclassify.gadf`
* :class:`~pysal.esda.mapclassify.K_classifiers`
References
----------
Slocum, T.A., R.B. McMaster, F.C. Kessler and H.H. Howard (2009) *Thematic Cartography and Geovisualization*. Pearson Prentice Hall, Upper Saddle River.
"""
def __init__(self, y):
self.name = 'Map Classifier'
if hasattr(y, 'values'):
y = y.values # fix for pandas
self.y = y
self._classify()
self._summary()
def _summary(self):
yb = self.yb
self.classes = [np.nonzero(yb == c)[0].tolist() for c in range(self.k)]
self.tss = self.get_tss()
self.adcm = self.get_adcm()
self.gadf = self.get_gadf()
def _classify(self):
self._set_bins()
self.yb, self.counts = bin1d(self.y, self.bins)
def __str__(self):
st = self._table_string()
return st
def __repr__(self):
return self._table_string()
def get_tss(self):
"""
Total sum of squares around class means
Returns sum of squares over all class means
"""
tss = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
def _set_bins(self):
pass
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm
def get_gadf(self):
"""
Goodness of absolute deviation of fit
"""
adam = (np.abs(self.y - np.median(self.y))).sum()
gadf = 1 - self.adcm / adam
return gadf
def _table_string(self, width=12, decimal=3):
fmt = ".%df" % decimal
fmt = "%" + fmt
largest = max([len(fmt % i) for i in self.bins])
width = largest
fmt = "%d.%df" % (width, decimal)
fmt = "%" + fmt
k1 = self.k - 1
h1 = "Lower"
h1 = h1.center(largest)
h2 = " "
h2 = h2.center(10)
h3 = "Upper"
h3 = h3.center(largest + 1)
largest = "%d" % max(self.counts)
largest = len(largest) + 15
h4 = "Count"
h4 = h4.rjust(largest)
table = []
header = h1 + h2 + h3 + h4
table.append(header)
table.append("=" * len(header))
rows = []
for i, up in enumerate(self.bins):
if i == 0:
left = " " * width
left += " x[i] <= "
else:
left = fmt % self.bins[i - 1]
left += " < x[i] <= "
right = fmt % self.bins[i]
row = left + right
cnt = "%d" % self.counts[i]
cnt = cnt.rjust(largest)
row += cnt
table.append(row)
name = self.name
top = name.center(len(row))
table.insert(0, top)
table.insert(1, " ")
table = "\n".join(table)
return table
class Equal_Interval(Map_Classifier):
"""
Equal Interval Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> ei = Equal_Interval(cal, k = 5)
>>> ei.k
5
>>> ei.counts
array([57, 0, 0, 0, 1])
>>> ei.bins
array([ 822.394, 1644.658, 2466.922, 3289.186, 4111.45 ])
>>>
Notes
-----
Intervals defined to have equal width:
.. math::
bins_j = min(y)+w*(j+1)
with :math:`w=\\frac{max(y)-min(j)}{k}`
"""
def __init__(self, y, k=K):
"""
see class docstring
"""
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Equal Interval'
def _set_bins(self):
y = self.y
k = self.k
max_y = max(y)
min_y = min(y)
rg = max_y - min_y
width = rg * 1. / k
cuts = np.arange(min_y + width, max_y + width, width)
if len(cuts) > self.k: # handle overshooting
cuts = cuts[0:k]
cuts[-1] = max_y
bins = cuts.copy()
self.bins = bins
class Percentiles(Map_Classifier):
"""
Percentiles Map Classification
Parameters
----------
y : array
attribute to classify
pct : array
percentiles default=[1,10,50,90,99,100]
Attributes
----------
yb : array
bin ids for observations (numpy array n x 1)
bins : array
the upper bounds of each class (numpy array k x 1)
k : int
the number of classes
counts : int
the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> p = Percentiles(cal)
>>> p.bins
array([ 1.35700000e-01, 5.53000000e-01, 9.36500000e+00,
2.13914000e+02, 2.17994800e+03, 4.11145000e+03])
>>> p.counts
array([ 1, 5, 23, 23, 5, 1])
>>> p2 = Percentiles(cal, pct = [50, 100])
>>> p2.bins
array([ 9.365, 4111.45 ])
>>> p2.counts
array([29, 29])
>>> p2.k
2
"""
def __init__(self, y, pct=[1, 10, 50, 90, 99, 100]):
self.pct = pct
Map_Classifier.__init__(self, y)
self.name = 'Percentiles'
def _set_bins(self):
y = self.y
pct = self.pct
self.bins = np.array([stats.scoreatpercentile(y, p) for p in pct])
self.k = len(self.bins)
class Box_Plot(Map_Classifier):
"""
Box_Plot Map Classification
Parameters
----------
y : array
attribute to classify
hinge : float
multiplier for IQR
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(n,1), the upper bounds of each class (monotonic)
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
low_outlier_ids : array
indices of observations that are low outliers
high_outlier_ids : array
indices of observations that are high outliers
Notes
-----
The bins are set as follows::
bins[0] = q[0]-hinge*IQR
bins[1] = q[0]
bins[2] = q[1]
bins[3] = q[2]
bins[4] = q[2]+hinge*IQR
bins[5] = inf (see Notes)
where q is an array of the first three quartiles of y and
IQR=q[2]-q[0]
If q[2]+hinge*IQR > max(y) there will only be 5 classes and no high outliers,
otherwise, there will be 6 classes and at least one high outlier.
Examples
--------
>>> cal = load_example()
>>> bp = Box_Plot(cal)
>>> bp.bins
array([ -5.28762500e+01, 2.56750000e+00, 9.36500000e+00,
3.95300000e+01, 9.49737500e+01, 4.11145000e+03])
>>> bp.counts
array([ 0, 15, 14, 14, 6, 9])
>>> bp.high_outlier_ids
array([ 0, 6, 18, 29, 33, 36, 37, 40, 42])
>>> cal[bp.high_outlier_ids]
array([ 329.92, 181.27, 370.5 , 722.85, 192.05, 110.74,
4111.45, 317.11, 264.93])
>>> bx = Box_Plot(np.arange(100))
>>> bx.bins
array([ -49.5 , 24.75, 49.5 , 74.25, 148.5 ])
"""
def __init__(self, y, hinge=1.5):
"""
Parameters
----------
y : array (n,1)
attribute to classify
hinge : float
multiple of inter-quartile range (default=1.5)
"""
self.hinge = hinge
Map_Classifier.__init__(self, y)
self.name = 'Box Plot'
def _set_bins(self):
y = self.y
pct = [25, 50, 75, 100]
bins = [stats.scoreatpercentile(y, p) for p in pct]
iqr = bins[-2] - bins[0]
self.iqr = iqr
pivot = self.hinge * iqr
left_fence = bins[0] - pivot
right_fence = bins[-2] + pivot
if right_fence < bins[-1]:
bins.insert(-1, right_fence)
else:
bins[-1] = right_fence
bins.insert(0, left_fence)
self.bins = np.array(bins)
self.k = len(pct)
def _classify(self):
Map_Classifier._classify(self)
self.low_outlier_ids = np.nonzero(self.yb == 0)[0]
self.high_outlier_ids = np.nonzero(self.yb == 5)[0]
class Quantiles(Map_Classifier):
"""
Quantile Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
each value is the id of the class the observation belongs to
yb[i] = j for j>=1 if bins[j-1] < y[i] <= bins[j], yb[i] = 0 otherwise
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> q = Quantiles(cal, k = 5)
>>> q.bins
array([ 1.46400000e+00, 5.79800000e+00, 1.32780000e+01,
5.46160000e+01, 4.11145000e+03])
>>> q.counts
array([12, 11, 12, 11, 12])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = 'Quantiles'
def _set_bins(self):
y = self.y
k = self.k
self.bins = quantile(y, k=k)
class Std_Mean(Map_Classifier):
"""
Standard Deviation and Mean Map Classification
Parameters
----------
y : array
(n,1), values to classify
multiples : array
the multiples of the standard deviation to add/subtract from
the sample mean to define the bins, default=[-2,-1,1,2]
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> st = Std_Mean(cal)
>>> st.k
5
>>> st.bins
array([ -967.36235382, -420.71712519, 672.57333208, 1219.21856072,
4111.45 ])
>>> st.counts
array([ 0, 0, 56, 1, 1])
>>>
>>> st3 = Std_Mean(cal, multiples = [-3, -1.5, 1.5, 3])
>>> st3.bins
array([-1514.00758246, -694.03973951, 945.8959464 , 1765.86378936,
4111.45 ])
>>> st3.counts
array([ 0, 0, 57, 0, 1])
>>>
"""
def __init__(self, y, multiples=[-2, -1, 1, 2]):
self.multiples = multiples
Map_Classifier.__init__(self, y)
self.name = 'Std_Mean'
def _set_bins(self):
y = self.y
s = y.std(ddof=1)
m = y.mean()
cuts = [m + s * w for w in self.multiples]
y_max = y.max()
if cuts[-1] < y_max:
cuts.append(y_max)
self.bins = np.array(cuts)
self.k = len(cuts)
class Maximum_Breaks(Map_Classifier):
"""
Maximum Breaks Map Classification
Parameters
----------
y : array
(n, 1), values to classify
k : int
number of classes required
mindiff : float
The minimum difference between class breaks
Attributes
----------
yb : array
(n, 1), bin ids for observations
bins : array
(k, 1), the upper bounds of each class
k : int
the number of classes
counts : array
(k, 1), the number of observations falling in each class (numpy array k x 1)
Examples
--------
>>> cal = load_example()
>>> mb = Maximum_Breaks(cal, k = 5)
>>> mb.k
5
>>> mb.bins
array([ 146.005, 228.49 , 546.675, 2417.15 , 4111.45 ])
>>> mb.counts
array([50, 2, 4, 1, 1])
>>>
"""
def __init__(self, y, k=5, mindiff=0):
self.k = k
self.mindiff = mindiff
Map_Classifier.__init__(self, y)
self.name = 'Maximum_Breaks'
def _set_bins(self):
xs = self.y.copy()
y = self.y.copy()
k = self.k
xs.sort()
min_diff = self.mindiff
d = xs[1:] - xs[:-1]
diffs = d[np.nonzero(d > min_diff)]
diffs = sp.unique(diffs)
k1 = k - 1
if len(diffs) > k1:
diffs = diffs[-k1:]
mp = []
self.cids = []
for diff in diffs:
ids = np.nonzero(d == diff)
for id in ids:
self.cids.append(id[0])
cp = ((xs[id] + xs[id + 1]) / 2.)
mp.append(cp[0])
mp.append(xs[-1])
mp.sort()
self.bins = np.array(mp)
class Natural_Breaks(Map_Classifier):
"""
Natural Breaks Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to generate, (default=100)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import numpy as np
>>> np.random.seed(10)
>>> cal = load_example()
>>> nb = Natural_Breaks(cal, k = 5)
>>> nb.k
5
>>> nb.counts
array([14, 13, 14, 10, 7])
>>> nb.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> x = np.array([1] * 50)
>>> x[-1] = 20
>>> nb = Natural_Breaks(x, k = 5, initial = 0)
Warning: Not enough unique values in array to form k classes
Warning: setting k to 2
>>> nb.bins
array([ 1, 20])
>>> nb.counts
array([49, 1])
Notes
-----
There is a tradeoff here between speed and consistency of the
classification
If you want more speed, set initial to a smaller value (0
would result in the best speed, if you want more consistent classes in
multiple runs of Natural_Breaks on the same data, set initial to a
higher value.
"""
def __init__(self, y, k=K, initial=100):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = 'Natural_Breaks'
def _set_bins(self):
x = self.y.copy()
k = self.k
res0 = natural_breaks(x, k)
fit = res0[2].sum()
for i in xrange(self.initial):
res = natural_breaks(x, k)
fit_i = res[2].sum()
if fit_i < fit:
res0 = res
self.bins = np.array(res0[-1])
self.k = len(self.bins)
self.iterations = res0[-2]
class Fisher_Jenks(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> fj = Fisher_Jenks(cal)
>>> fj.adcm
799.24000000000001
>>> fj.bins
array([ 75.29, 192.05, 370.5 , 722.85, 4111.45])
>>> fj.counts
array([49, 3, 4, 1, 1])
>>>
"""
def __init__(self, y, k=K):
nu = len(np.unique(y))
if nu < k:
raise ValueError("Fewer unique values than specified classes.")
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Fisher_Jenks"
def _set_bins(self):
x = self.y.copy()
self.bins = np.array(_fisher_jenks_means(x, classes=self.k)[1:])
class Fisher_Jenks_Sampled(Map_Classifier):
"""
Fisher Jenks optimal classifier - mean based using random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then
pct = 1000./n, unless truncate is False
truncate : boolean
truncate pct in cases where pct * n > 1000., (Default True)
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
(Turned off due to timing being different across hardware)
"""
def __init__(self, y, k=K, pct=0.10, truncate=True):
self.k = k
n = y.size
if (pct * n > 1000) and truncate:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[-1] = max(y) # make sure we have the upper bound
yr[0] = min(y) # make sure we have the min
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Fisher_Jenks_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
fj = Fisher_Jenks(self.y, self.k)
self.bins = fj.bins
class Jenks_Caspall(Map_Classifier):
"""
Jenks Caspall Map Classification
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jc = Jenks_Caspall(cal, k = 5)
>>> jc.bins
array([ 1.81000000e+00, 7.60000000e+00, 2.98200000e+01,
1.81270000e+02, 4.11145000e+03])
>>> jc.counts
array([14, 13, 14, 10, 7])
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall"
def _set_bins(self):
x = self.y.copy()
k = self.k
# start with quantiles
q = quantile(x, k)
solving = True
xb, cnts = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, k = x.shape
xm = [np.median(x[xb == i]) for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
it = 0
rk = range(self.k)
while solving:
xb = np.zeros(xb0.shape, int)
d = abs(x - q)
xb = d.argmin(axis=1)
if (xb0 == xb).all():
solving = False
else:
xb0 = xb
it += 1
q = np.array([np.median(x[xb == i]) for i in rk])
cuts = np.array([max(x[xb == i]) for i in sp.unique(xb)])
cuts.shape = (len(cuts),)
self.bins = cuts
self.iterations = it
class Jenks_Caspall_Sampled(Map_Classifier):
"""
Jenks Caspall Map Classification using a random sample
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
pct : float
The percentage of n that should form the sample
If pct is specified such that n*pct > 1000, then pct = 1000./n
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> x = np.random.random(100000)
>>> jc = Jenks_Caspall(x)
>>> jcs = Jenks_Caspall_Sampled(x)
>>> jc.bins
array([ 0.19770952, 0.39695769, 0.59588617, 0.79716865, 0.99999425])
>>> jcs.bins
array([ 0.18877882, 0.39341638, 0.6028286 , 0.80070925, 0.99999425])
>>> jc.counts
array([19804, 20005, 19925, 20178, 20088])
>>> jcs.counts
array([18922, 20521, 20980, 19826, 19751])
>>>
# not for testing since we get different times on different hardware
# just included for documentation of likely speed gains
#>>> t1 = time.time(); jc = Jenks_Caspall(x); t2 = time.time()
#>>> t1s = time.time(); jcs = Jenks_Caspall_Sampled(x); t2s = time.time()
#>>> t2 - t1; t2s - t1s
#1.8292930126190186
#0.061631917953491211
Notes
-----
This is intended for large n problems. The logic is to apply
Jenks_Caspall to a random subset of the y space and then bin the
complete vector y on the bins obtained from the subset. This would
trade off some "accuracy" for a gain in speed.
"""
def __init__(self, y, k=K, pct=0.10):
self.k = k
n = y.size
if pct * n > 1000:
pct = 1000. / n
ids = np.random.random_integers(0, n - 1, n * pct)
yr = y[ids]
yr[0] = max(y) # make sure we have the upper bound
self.original_y = y
self.pct = pct
self.yr = yr
self.yr_n = yr.size
Map_Classifier.__init__(self, yr)
self.yb, self.counts = bin1d(y, self.bins)
self.name = "Jenks_Caspall_Sampled"
self.y = y
self._summary() # have to recalculate summary stats
def _set_bins(self):
jc = Jenks_Caspall(self.y, self.k)
self.bins = jc.bins
self.iterations = jc.iterations
class Jenks_Caspall_Forced(Map_Classifier):
"""
Jenks Caspall Map Classification with forced movements
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
Attributes
----------
yb : array
(n,1), bin ids for observations
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> jcf = Jenks_Caspall_Forced(cal, k = 5)
>>> jcf.k
5
>>> jcf.bins
array([[ 1.34000000e+00],
[ 5.90000000e+00],
[ 1.67000000e+01],
[ 5.06500000e+01],
[ 4.11145000e+03]])
>>> jcf.counts
array([12, 12, 13, 9, 12])
>>> jcf4 = Jenks_Caspall_Forced(cal, k = 4)
>>> jcf4.k
4
>>> jcf4.bins
array([[ 2.51000000e+00],
[ 8.70000000e+00],
[ 3.66800000e+01],
[ 4.11145000e+03]])
>>> jcf4.counts
array([15, 14, 14, 15])
>>>
"""
def __init__(self, y, k=K):
self.k = k
Map_Classifier.__init__(self, y)
self.name = "Jenks_Caspall_Forced"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
solving = True
xb, cnt = bin1d(x, q)
#class means
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
xm = [x[xb == i].mean() for i in np.unique(xb)]
xb0 = xb.copy()
q = xm
xbar = np.array([xm[xbi] for xbi in xb])
xbar.shape = (n, 1)
ss = x - xbar
ss *= ss
ss = sum(ss)
maxk = k - 1
down_moves = up_moves = 0
solving = True
it = 0
while solving:
# try upward moves first
moving_up = True
while moving_up:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[:-1]
i = 0
up_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = max(ids[0])
tmp = xb.copy()
tmp[mover] = xb[mover] + 1
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
up_moves += 1
i += 1
if not up_moves:
moving_up = False
moving_down = True
while moving_down:
class_ids = sp.unique(xb)
nk = [sum(xb == j) for j in class_ids]
candidates = nk[1:]
i = 1
down_moves = 0
while candidates:
nki = candidates.pop(0)
if nki > 1:
ids = np.nonzero(xb == class_ids[i])
mover = min(ids[0])
mover_class = xb[mover]
target_class = mover_class - 1
tmp = xb.copy()
tmp[mover] = target_class
tm = [x[tmp == j].mean() for j in sp.unique(tmp)]
txbar = np.array([tm[xbi] for xbi in tmp])
txbar.shape = (n, 1)
tss = x - txbar
tss *= tss
tss = sum(tss)
if tss < ss:
xb = tmp
ss = tss
candidates = []
down_moves += 1
i += 1
if not down_moves:
moving_down = False
if not up_moves and not down_moves:
solving = False
it += 1
cuts = [max(x[xb == i]) for i in sp.unique(xb)]
self.bins = np.array(cuts)
self.iterations = it
class User_Defined(Map_Classifier):
"""
User Specified Binning
Parameters
----------
y : array
(n,1), values to classify
bins : array
(k,1), upper bounds of classes (have to be monotically increasing)
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> cal = load_example()
>>> bins = [20, max(cal)]
>>> bins
[20, 4111.4499999999998]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 4111.45])
>>> ud.counts
array([37, 21])
>>> bins = [20, 30]
>>> ud = User_Defined(cal, bins)
>>> ud.bins
array([ 20. , 30. , 4111.45])
>>> ud.counts
array([37, 4, 17])
>>>
Notes
-----
If upper bound of user bins does not exceed max(y) we append an
additional bin.
"""
def __init__(self, y, bins):
if bins[-1] < max(y):
bins.append(max(y))
self.k = len(bins)
self.bins = np.array(bins)
self.y = y
Map_Classifier.__init__(self, y)
self.name = 'User Defined'
def _set_bins(self):
pass
class Max_P_Classifier(Map_Classifier):
"""
Max_P Map Classification
Based on Max_p regionalization algorithm
Parameters
----------
y : array
(n,1), values to classify
k : int
number of classes required
initial : int
number of initial solutions to use prior to swapping
Attributes
----------
yb : array
(n,1), bin ids for observations,
bins : array
(k,1), the upper bounds of each class
k : int
the number of classes
counts : array
(k,1), the number of observations falling in each class
Examples
--------
>>> import pysal
>>> cal = pysal.esda.mapclassify.load_example()
>>> mp = pysal.Max_P_Classifier(cal)
>>> mp.bins
array([ 8.7 , 16.7 , 20.47, 66.26, 4111.45])
>>> mp.counts
array([29, 8, 1, 10, 10])
"""
def __init__(self, y, k=K, initial=1000):
self.k = k
self.initial = initial
Map_Classifier.__init__(self, y)
self.name = "Max_P"
def _set_bins(self):
x = self.y.copy()
k = self.k
q = quantile(x, k)
if x.ndim == 1:
x.shape = (x.size, 1)
n, tmp = x.shape
x.sort(axis=0)
# find best of initial solutions
solution = 0
best_tss = x.var() * x.shape[0]
tss_all = np.zeros((self.initial, 1))
while solution < self.initial:
remaining = range(n)
seeds = [np.nonzero(di == min(
di))[0][0] for di in [np.abs(x - qi) for qi in q]]
rseeds = np.random.permutation(range(k)).tolist()
tmp = [remaining.remove(seed) for seed in seeds]
self.classes = classes = []
tmp = [classes.append([seed]) for seed in seeds]
while rseeds:
seed_id = rseeds.pop()
current = classes[seed_id]
growing = True
while growing:
current = classes[seed_id]
low = current[0]
high = current[-1]
left = low - 1
right = high + 1
move_made = False
if left in remaining:
current.insert(0, left)
remaining.remove(left)
move_made = True
if right in remaining:
current.append(right)
remaining.remove(right)
move_made = True
if move_made:
classes[seed_id] = current
else:
growing = False
tss = _fit(self.y, classes)
tss_all[solution] = tss
if tss < best_tss:
best_solution = classes
best_it = solution
best_tss = tss
solution += 1
classes = best_solution
self.best_it = best_it
self.tss = best_tss
self.a2c = a2c = {}
self.tss_all = tss_all
for r, cl in enumerate(classes):
for a in cl:
a2c[a] = r
swapping = True
it = 0
while swapping:
rseeds = np.random.permutation(range(k)).tolist()
total_moves = 0
while rseeds:
id = rseeds.pop()
growing = True
total_moves = 0
while growing:
target = classes[id]
left = target[0] - 1
right = target[-1] + 1
n_moves = 0
if left in a2c:
left_class = classes[a2c[left]]
if len(left_class) > 1:
a = left_class[-1]
if self._swap(left_class, target, a):
target.insert(0, a)
left_class.remove(a)
a2c[a] = id
n_moves += 1
if right in a2c:
right_class = classes[a2c[right]]
if len(right_class) > 1:
a = right_class[0]
if self._swap(right_class, target, a):
target.append(a)
right_class.remove(a)
n_moves += 1
a2c[a] = id
if not n_moves:
growing = False
total_moves += n_moves
if not total_moves:
swapping = False
xs = self.y.copy()
xs.sort()
self.bins = np.array([xs[cl][-1] for cl in classes])
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
def _fit(y, classes):
"""Calculate the total sum of squares for a vector y classified into
classes
Parameters
----------
y : array
(n,1), variable to be classified
classes : array
(k,1), integer values denoting class membership
"""
tss = 0
for class_def in classes:
yc = y[class_def]
css = yc - yc.mean()
css *= css
tss += sum(css)
return tss
kmethods = {}
kmethods["Quantiles"] = Quantiles
kmethods["Fisher_Jenks"] = Fisher_Jenks
kmethods['Natural_Breaks'] = Natural_Breaks
kmethods['Maximum_Breaks'] = Maximum_Breaks
def gadf(y, method="Quantiles", maxk=15, pct=0.8):
"""
Evaluate the Goodness of Absolute Deviation Fit of a Classifier
Finds the minimum value of k for which gadf>pct
Parameters
----------
y : array
(n, 1) values to be classified
method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'}
maxk : int
maximum value of k to evaluate
pct : float
The percentage of GADF to exceed
Returns
-------
k : int
number of classes
cl : object
instance of the classifier at k
gadf : float
goodness of absolute deviation fit
Examples
--------
>>> cal = load_example()
>>> qgadf = gadf(cal)
>>> qgadf[0]
15
>>> qgadf[-1]
0.37402575909092828
Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to
0.2 we see quintiles as a result
>>> qgadf2 = gadf(cal, pct = 0.2)
>>> qgadf2[0]
5
>>> qgadf2[-1]
0.21710231966462412
>>>
Notes
-----
The GADF is defined as:
.. math::
GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}|
where :math:`y_{med}` is the global median and :math:`y_{c,med}` is
the median for class :math:`c`.
See Also
--------
K_classifiers
"""
y = np.array(y)
adam = (np.abs(y - np.median(y))).sum()
for k in range(2, maxk + 1):
cl = kmethods[method](y, k)
gadf = 1 - cl.adcm / adam
if gadf > pct:
break
return (k, cl, gadf)
class K_classifiers:
"""
Evaluate all k-classifers and pick optimal based on k and GADF
Parameters
----------
y : array
(n,1), values to be classified
pct : float
The percentage of GADF to exceed
Attributes
----------
best : object
instance of the optimal Map_Classifier
results : dictionary
keys are classifier names, values are the Map_Classifier instances with the best pct for each classifer
Examples
--------
>>> cal = load_example()
>>> ks = K_classifiers(cal)
>>> ks.best.name
'Fisher_Jenks'
>>> ks.best.k
4
>>> ks.best.gadf
0.84810327199081048
>>>
Notes
-----
This can be used to suggest a classification scheme.
See Also
--------
gadf
"""
def __init__(self, y, pct=0.8):
results = {}
c = 0
best = gadf(y, "Fisher_Jenks", maxk=len(y) - 1, pct=pct)
pct0 = best[0]
k0 = best[-1]
keys = kmethods.keys()
keys.remove("Fisher_Jenks")
results["Fisher_Jenks"] = best
for method in keys:
results[method] = gadf(y, method, maxk=len(y) - 1, pct=pct)
k1 = results[method][0]
pct1 = results[method][-1]
if (k1 < k0) or (k1 == k0 and pct0 < pct1):
best = results[method]
k0 = k1
pct0 = pct1
self.results = results
self.best = best[1]
def fj(x, k=5):
y = x.copy()
y.sort()
d = {}
initial = opt_part(y)
# d has key = number of groups
# value: list of ids, list of group tss, group size
split_id = [initial[0]]
tss = initial[1:] # left and right within tss
sizes = [split_id - 1, len(y) - split_id]
d[2] = [split_id, tss, sizes]
return d
def opt_part(x):
"""
Find optimal bi-partition of x values
Parameters
-----------
x : array
(n,1), Array of attribute values
Returns
-------
opt_i : int
partition index
tss : float
toal sum of squares
left_min : float
variance to the left of the break (including the break)
right_min : float
variance to the right of the break
"""
n = len(x)
tss = np.inf
opt_i = -999
for i in xrange(1, n):
print i
left = x[:i].var() * i
right = x[i:].var() * (n - i)
tss_i = left + right
if tss_i < tss:
opt_i = i
tss = tss_i
left_min = left
right_min = right
return (opt_i, tss, left_min, right_min)
| bsd-3-clause |
Arn-O/kadenze-deep-creative-apps | final-project/libs/utils.py | 1 | 2157 | """
Utility for image manipulation, directly copied (and slightly modified) from:
https://github.com/pkmital/CADL
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
def img_crop(img, pos):
"""Crop to square from a position.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
pos : int
Start position.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = img.shape[0]
crop = np.take(img, range(pos, pos + size), axis=1)
return crop
def build_gif(imgs, interval=0.1, dpi=72,
save_gif=True, saveto='animation.gif',
show_gif=False, cmap=None):
"""Take an array or list of images and create a GIF.
Parameters
----------
imgs : np.ndarray or list
List of images to create a GIF of
interval : float, optional
Spacing in seconds between successive images.
dpi : int, optional
Dots per inch.
save_gif : bool, optional
Whether or not to save the GIF.
saveto : str, optional
Filename of GIF to save.
show_gif : bool, optional
Whether or not to render the GIF using plt.
cmap : None, optional
Optional colormap to apply to the images.
Returns
-------
ani : matplotlib.animation.ArtistAnimation
The artist animation from matplotlib. Likely not useful.
"""
imgs = np.asarray(imgs)
h, w, *c = imgs[0].shape
fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
fig.subplots_adjust(bottom=0)
fig.subplots_adjust(top=1)
fig.subplots_adjust(right=1)
fig.subplots_adjust(left=0)
ax.set_axis_off()
if cmap is not None:
axs = list(map(lambda x: [
ax.imshow(x, cmap=cmap)], imgs))
else:
axs = list(map(lambda x: [
ax.imshow(x)], imgs))
ani = animation.ArtistAnimation(
fig, axs, interval=interval*1000, repeat_delay=0, blit=True)
if save_gif:
ani.save(saveto, writer='imagemagick', dpi=dpi)
if show_gif:
plt.show()
return ani
| apache-2.0 |
sangwook236/SWDT | sw_dev/python/rnd/test/image_processing/skimage/skimage_thresholding.py | 2 | 6105 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import skimage
import skimage.filters, skimage.morphology
import matplotlib
import matplotlib.pyplot as plt
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_thresholding.html
def try_all_threshold_example():
img = skimage.data.page()
# Specify a radius for local thresholding algorithms.
# If it is not specified, only global algorithms are called.
fig, ax = skimage.filters.try_all_threshold(img, figsize=(10, 8), verbose=False)
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_thresholding.html
def threshold_example():
image = skimage.data.camera()
#thresh = skimage.filters.threshold_isodata(image)
#thresh = skimage.filters.threshold_li(image)
#thresh = skimage.filters.threshold_mean(image)
#thresh = skimage.filters.threshold_minimum(image)
thresh = skimage.filters.threshold_otsu(image)
#thresh = skimage.filters.threshold_triangle(image)
#thresh = skimage.filters.threshold_yen(image)
binary = image > thresh
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 3, 1)
ax[1] = plt.subplot(1, 3, 2)
ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0])
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].hist(image.ravel(), bins=256)
ax[1].set_title('Histogram')
ax[1].axvline(thresh, color='r')
ax[2].imshow(binary, cmap=plt.cm.gray)
ax[2].set_title('Thresholded')
ax[2].axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_multiotsu.html
def multiotsu_example():
# Setting the font size for all plots.
matplotlib.rcParams['font.size'] = 9
# The input image.
image = skimage.data.camera()
# Applying multi-Otsu threshold for the default value, generating three classes.
thresholds = skimage.filters.threshold_multiotsu(image)
# Using the threshold values, we generate the three regions.
regions = np.digitize(image, bins=thresholds)
#regions_colorized = skimage.color.label2rgb(regions)
#plt.imshow(regions_colorized)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(10, 3.5))
# Plotting the original image.
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Original')
ax[0].axis('off')
# Plotting the histogram and the two thresholds obtained from multi-Otsu.
ax[1].hist(image.ravel(), bins=255)
ax[1].set_title('Histogram')
for thresh in thresholds:
ax[1].axvline(thresh, color='r')
# Plotting the Multi Otsu result.
ax[2].imshow(regions, cmap='Accent')
ax[2].set_title('Multi-Otsu result')
ax[2].axis('off')
plt.subplots_adjust()
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/applications/plot_thresholding.html
def local_otsu_threshold_example():
# Otsu's threshold method can be applied locally.
# For each pixel, an "optimal" threshold is determined by maximizing the variance between two classes of pixels of the local neighborhood defined by a structuring element.
img = skimage.util.img_as_ubyte(skimage.data.page())
radius = 15
selem = skimage.morphology.disk(radius)
local_otsu = skimage.filters.rank.otsu(img, selem)
threshold_global_otsu = skimage.filters.threshold_otsu(img)
global_otsu = img >= threshold_global_otsu
fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex=True, sharey=True)
ax = axes.ravel()
plt.tight_layout()
fig.colorbar(ax[0].imshow(img, cmap=plt.cm.gray), ax=ax[0], orientation='horizontal')
ax[0].set_title('Original')
ax[0].axis('off')
fig.colorbar(ax[1].imshow(local_otsu, cmap=plt.cm.gray), ax=ax[1], orientation='horizontal')
ax[1].set_title('Local Otsu (radius=%d)' % radius)
ax[1].axis('off')
ax[2].imshow(img >= local_otsu, cmap=plt.cm.gray)
ax[2].set_title('Original >= Local Otsu' % threshold_global_otsu)
ax[2].axis('off')
ax[3].imshow(global_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu (threshold = %d)' % threshold_global_otsu)
ax[3].axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/applications/plot_thresholding.html
def local_threshold_example():
image = skimage.data.page()
global_thresh = skimage.filters.threshold_otsu(image)
binary_global = image > global_thresh
block_size = 35
local_thresh = skimage.filters.threshold_local(image, block_size, offset=10)
binary_local = image > local_thresh
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax = axes.ravel()
plt.gray()
ax[0].imshow(image)
ax[0].set_title('Original')
ax[1].imshow(binary_global)
ax[1].set_title('Global thresholding')
ax[2].imshow(binary_local)
ax[2].set_title('Local thresholding')
for a in ax:
a.axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_niblack_sauvola.html
def niblack_and_sauvola_example():
matplotlib.rcParams['font.size'] = 9
image = skimage.data.page()
binary_global = image > skimage.filters.threshold_otsu(image)
window_size = 25
thresh_niblack = skimage.filters.threshold_niblack(image, window_size=window_size, k=0.8)
thresh_sauvola = skimage.filters.threshold_sauvola(image, window_size=window_size)
binary_niblack = image > thresh_niblack
binary_sauvola = image > thresh_sauvola
plt.figure(figsize=(8, 7))
plt.subplot(2, 2, 1)
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Original')
plt.axis('off')
plt.subplot(2, 2, 2)
plt.title('Global Threshold')
plt.imshow(binary_global, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(binary_niblack, cmap=plt.cm.gray)
plt.title('Niblack Threshold')
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(binary_sauvola, cmap=plt.cm.gray)
plt.title('Sauvola Threshold')
plt.axis('off')
plt.show()
def main():
#try_all_threshold_example()
#threshold_example()
#multiotsu_example()
# Local/adaptive thresholding.
local_otsu_threshold_example()
#local_threshold_example()
#niblack_and_sauvola_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
jbloomlab/dms_tools | src/plot.py | 1 | 32057 | """
========================
``plot`` module
========================
This module uses ``pylab`` and ``matplotlib`` to make plots.
The ``pdf`` backend is used for ``matplotlib`` / ``pylab``.
Written by Jesse Bloom.
Functions in this module
---------------------------
`PlotCorrelation` : plots correlation between two variables.
`Base10Formatter` : formats numbers in base 10
`PlotPairedMutFracs` : bar graph of mutation frequencies per sample
`PlotMutCountFracs` : plots fraction of mutations that occur >= a given number of times.
`PlotDepth` : plots per-site depth along primary sequence
`PlotReadStarts` : plots start of read distributions for subassembly.
`PlotSampleBargraphs` : plots bargraphs of categories for several samples.
Function documentation
-----------------------------
"""
import os
import math
import matplotlib
matplotlib.use('pdf')
import pylab
import dms_tools.file_io
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by `exp_cutoff` is met.
`number` : the number to be formatted, should be a float or integer.
Currently only works for `number >= 0`.
`exp_cutoff` : convert to scientific notation if ``abs(math.log10(number))
>= exp_cutoff``.
`exp_decimal_digits` : show this many digits after the decimal if number
is converted to scientific notation.
`decimal_digits` : show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is a LaTex formatted string. If the number is zero, the
returned string is simply '0'.
EXAMPLES:
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def PlotCorrelation(xs, ys, plotfile, xlabel, ylabel, logx=False, logy=False,\
corr=None, title=False, alpha=1.0, symmetrize=False, fixaxes=False, additionalxy=[], bigmargin=0.35, xsize=1.8, r2=False,
marker_style='b.', additional_marker_style='r^', marker_size=4, additional_marker_size=3):
"""Plots the correlation between two variables as a scatter plot.
The data is plotted as a scatter plot.
This function uses ``pylab`` / ``matplotlib``.
The calling variables use LaTex format for strings. So for
example, '$10^5$' will print the LaTex equivalent of this
string. Similarly, certain raw text strings (such as those
including underscores) will cause problems if you do not
escape the LaTex format meaning. For instance, 'x_label'
will cause a problem since underscore is not valid
outside of math mode in LaTex, so you would need to use
'x\_label' to escape the underscore.
CALLING VARIABLES:
* *xs* and *ys* are lists of numbers, with the lists
being of the same length. Entry *xs[i]* is plotted
on the x-axis against entries *ys[i]* on the y-axis.
* *plotfile* is a string giving the name of the plot PDF file
that we create. It should end in the extension ``.pdf``.
If this plot already exists, it is overwritten.
* *xlabel* is a string giving the label placed on the x-axis.
* *ylabel* is a string giving the label placed on the y-axis.
* *logx* specifies that we log transform the data in *xs*.
This is *False* by default; set to *True* if you want
to log transform (base 10 logarithms) the data.
* *logy* is like *logx*, but for the data in *ys*.
* *corr* specifies that we include a correlation coefficient on the
plot. If it has its default value of *None*, then no correlation
coefficient is included. Otherwise, *corr = (r, p)* where *r* is
the correlation coefficient (displayed with the label ``R``) and *p*
is the P-value (displayed with the label ``P``).
* *r2* : if *True* and using *corr*, then display :math:`R^2` rather
than :math:`R`.
* *title* is a string giving the title placed above the plot.
It can be *False* if no title is to be used. Otherwise, it should
be the title string (using LaTex formatting, spaces are allowed).
Is *False* by default.
* *alpha* is the transparency of the plotted points. By default
it is one, which means that there is no transparency. If you make
the value closer to zero, then the points become partially transparent.
The rationale is that if you have many overlapping points, making
them partially transparent helps you better see the density.
At any position on the plot, the intensity will be saturated
when there are 1.0 / alpha points plotted. So a reasonable value
of *alpha* might be something like 0.1.
* *symmetrize* is an optional argument that is *False* by default.
If *True*, we make the X and Y limits on the plot the same.
* *fixaxes* is an optional argument that is *False* by default.
If *True*, we fix both the X and Y axes to go from 0 to 1,
with ticks at 0, 0.5, and 1. If you set this option to *True*,
then you must set *logx* and *logy* to *False*.
* *additionalxy* is an optional list. By default, it is an empty list.
However, you can use it to plot additional data points with a different
color. The main data (specified by *xs* and *ys*) is plotted with
blue circles. For each addition set of data that you want to plot,
include a 2-tuple of lists of the form *(x2s, y2s)* in
*additionalxy*. Currently, only one such 2-tuple is allowed
(so only one additional data set). These are plotted as red
triangles, whereas the main data is plotted as blue
circles. The same *alpha* value set by the *alpha* parameter applies
to these points as well.
* *bigmargin* is an optional argument that is 0.26 by default. It is
the fraction of the plot width taken up by the larger margin, which
is the bottom and left. Make larger if you need more space for axis
labels.
* *xsize* is an optional argument that is the width of the plot in inches.
It is 1.8 by default.
* *marker_style* and *additional_marker_style* are optional arguments to
change the color/style of the marker for the main and additional data points,
respectively. See the matplotlib documentation for a full explanation.
* *marker_size* and *additional_marker_size* are optional arguments to
set the size of the markers.
"""
if not os.path.splitext(plotfile)[1].upper() == '.PDF':
raise ValueError("plotfile does not end in PDF extension: %s " % plotfile)
if not (len(xs) == len(ys) >= 2):
raise ValueError("xs and ys do not specify lists of the same length with >= 2 entries")
if fixaxes and (logy or logx):
raise ValueError("Cannot use fixaxes with logx or logy")
smallmargin = 0.04
(lmargin, rmargin, bmargin, tmargin) = (bigmargin, smallmargin, bigmargin, smallmargin)
plotmargin = 0.03 # add this much above and below the last data point
logplotmargin = 2.0 # scale limits by this much if log scale
if title:
titlemargin = 0.09 * (0.8 + title.count('\n') + title.count('\\\\'))
tmargin += titlemargin
ysize = xsize * (1.0 - lmargin - rmargin) / (1.0 - tmargin - bmargin)
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=9)
matplotlib.rc('legend', fontsize=10)
figure = pylab.figure(figsize=(xsize, ysize), facecolor='white')
ax = pylab.axes([lmargin, bmargin, 1.0 - lmargin - rmargin, 1.0 - tmargin - bmargin])
pylab.plot(xs, ys, marker_style, markersize=marker_size, alpha=alpha, mew=0, clip_on=False)
if additionalxy:
if len(additionalxy) != 1:
raise ValueError("Currently additionalxy only works for one additional set of data")
(x2s, y2s) = additionalxy[0]
assert len(x2s) == len(y2s) > 0, "additionalxy does not specify two non-empty data lists of equal length"
xs += x2s # add for later correlation and limit calculations
ys += y2s
pylab.plot(x2s, y2s, additional_marker_style, markersize=additional_marker_size, alpha=alpha)
(xmin, xmax, ymin, ymax) = (min(xs), max(xs), min(ys), max(ys))
if fixaxes:
xmin = ymin = 0.0
xmax = ymax = 1.0
elif symmetrize:
xmin = ymin = min(xmin, ymin)
xmax = ymax = max(xmax, ymax)
if logy:
pylab.gca().set_yscale('log')
ax.set_ylim([ymin / logplotmargin, ymax * logplotmargin])
ys = [math.log(y) for y in ys]
else:
ymargin = plotmargin * (ymax - ymin)
ax.set_ylim([ymin - ymargin, ymax + ymargin])
if logx:
pylab.gca().set_xscale('log')
ax.set_xlim([xmin / logplotmargin, xmax * logplotmargin])
xs = [math.log(x) for x in xs]
else:
xmargin = plotmargin * (xmax - xmin)
ax.set_xlim([xmin - xmargin, xmax + xmargin])
pylab.xlabel(xlabel, size=10)
pylab.ylabel(ylabel, size=10)
if title:
pylab.title(title, size=10)
if corr:
(r, p) = corr
if r2:
r = '$R^2 = %.2f$' % (r**2)
else:
r = '$R = %.2f$' % r
if p < 1e-10:
p = '$P < 10^{-10}$'
else:
p = '$P = %s$' % Base10Formatter(p, 2, 1, 2)
text = '%s\n%s' % (r, p)
pylab.text(0.05, 0.96, text, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes, size=10)
if logy:
yticker = matplotlib.ticker.LogLocator(numticks=4)
elif fixaxes:
yticker = matplotlib.ticker.FixedLocator([0, 0.5, 1])
else:
yticker = matplotlib.ticker.MaxNLocator(4, integer=True)
pylab.gca().yaxis.set_major_locator(yticker)
if logx:
xticker = matplotlib.ticker.LogLocator(numticks=4)
elif fixaxes:
xticker = matplotlib.ticker.FixedLocator([0, 0.5, 1])
else:
xticker = matplotlib.ticker.MaxNLocator(4, integer=True)
spineOffset = {'left': 3, 'bottom': 3}
[spine.set_position(('outward',spineOffset[loc])) if loc in ['left','bottom'] else spine.set_color('none') for loc, spine in ax.spines.items() ]
ax.tick_params(axis='x', direction='out')
ax.tick_params(axis='y', direction='out')
pylab.gca().xaxis.set_major_locator(xticker)
pylab.gca().get_xaxis().tick_bottom()
pylab.gca().get_yaxis().tick_left()
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def PlotDepth(codon_counts, names, plotfile, mutdepth=False, y_axis_label=None,
separatemuttypes=False):
"""Plots per-site depth along primary sequence.
`codon_counts` : a list of dictionaries giving the codon counts for
each sample as read by `dms_tools.file_io.ReadDMSCounts`.
`names` : a list of strings giving the names of the samples
corresponding to each entry in `codon_counts`.
`plotfile` : name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be ``.pdf``.
`mutdepth` : Boolean switch, if *True* then rather than plotting
sequencing depth, we plot per-site mutation rate.
`y_axis_label` : a string, if specified, overrides the default y-axis
label 'reads' or 'mutation frequency'.
`separatemuttypes` : Boolean switch specifying that we plot a different
line for each of nonsynonymous, synonymous, and stop codon mutations.
Only can be used if *mutdepth* is *True*.
"""
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
if separatemuttypes and not mutdepth:
raise ValueError("Can't use separatemuttypes without mutdepth")
assert len(codon_counts) == len(names) > 0, "codon_counts and names are not non-empty lists of the same length"
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=10)
matplotlib.rc('legend', fontsize=10)
sites = list(codon_counts[0].iterkeys())
codon_counts = [dms_tools.utils.ClassifyCodonCounts(counts) for counts in codon_counts]
xs = list(range(len(sites)))
dms_tools.utils.NaturalSort(sites)
if separatemuttypes:
nlegendcols = 2
nlegendrows = int(math.ceil(3 * len(names) / float(nlegendcols)))
else:
nlegendcols = 4
nlegendrows = int(math.ceil(len(names) / float(nlegendcols)))
fig = pylab.figure(figsize=(5.5, 2.16 * (0.76 + 0.23 * nlegendrows)))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.02, 0.19, 0.01 + 0.11 * nlegendrows)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - bmargin - tmargin])
lines = []
all_ys = []
legendnames = []
for (name, counts) in zip(names, codon_counts):
if mutdepth:
if separatemuttypes:
muttypes = [(' nonsyn', 'N_NS', 1), (' syn X 20', 'N_SYN', 20), (' stop X 20', 'N_STOP', 20)]
else:
muttypes = [('', 'all', 1)]
for (mtype, key, scalefac) in muttypes:
ys = []
for r in sites:
if counts[r]['COUNTS']:
if key == 'all':
ys.append((counts[r]['COUNTS'] - counts[r]['N_WT']) / float(counts[r]['COUNTS']))
else:
ys.append(scalefac * counts[r][key] / float(counts[r]['COUNTS']))
else:
ys.append(0)
all_ys += ys
line = pylab.plot(xs, ys, lw=1.2)
lines.append(line[0])
legendnames.append(name.replace('_', ' ') + mtype)
else:
ys = [counts[r]['COUNTS'] for r in sites]
all_ys += ys
line = pylab.plot(xs, ys, lw=1.2)
lines.append(line[0])
legendnames.append(name.replace('_', ' '))
pylab.xlabel('codon position')
if mutdepth:
pylab.ylabel('mutation frequency')
else:
pylab.ylabel('number of reads')
if y_axis_label:
pylab.ylabel(y_axis_label)
all_ys.sort()
# if the top y value is excessively large, set a small ymax to avoid distorting the y-axis
if all_ys[-1] >= 2.5 * all_ys[-len(legendnames) - 1]:
pylab.gca().set_ylim([0, 2.5 * all_ys[-len(legendnames) - 1]])
else:
pylab.gca().set_ylim([0, pylab.gca().get_ylim()[1]])
yticker = matplotlib.ticker.MaxNLocator(4)
pylab.gca().yaxis.set_major_locator(yticker)
yformatter = pylab.ScalarFormatter(useMathText=True)
yformatter.set_powerlimits((-2, 3))
pylab.gca().yaxis.set_major_formatter(yformatter)
pylab.gca().set_xlim([0, len(xs) - 1])
if len(xs) <= 250:
# xtick every 50
xlocator = matplotlib.ticker.FixedLocator(list(range(0, len(xs), 50)))
xformatter = matplotlib.ticker.FixedFormatter([sites[i] for i in range(0, len(xs), 50)])
else:
# xtick every 100
xlocator = matplotlib.ticker.FixedLocator(list(range(0, len(xs), 100)))
xformatter = matplotlib.ticker.FixedFormatter([sites[i] for i in range(0, len(xs), 100)])
pylab.gca().xaxis.set_major_locator(xlocator)
pylab.gca().xaxis.set_major_formatter(xformatter)
pylab.legend(lines, legendnames, handlelength=1.2, handletextpad=0.3, columnspacing=0.9, bbox_to_anchor=(0.54, 1.03 + 0.14 * nlegendrows), loc='upper center', ncol=nlegendcols)
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def PlotPairedMutFracs(codon_counts, names, plotfile, ylabel='fraction'):
"""Makes paired bar graph of mutation fractions per codon.
For each sample, calculates the fraction of codon counts
that represent synonymous / nonsynoymous /
stop codons, and one- / two- / three-nucleotide mutations. Makes
paired stacked bar graphs showing these fractions. The fractions
for all sites in the gene are aggregated together.
`codon_counts` : a list of dictionaries giving the codon counts for
each sample as read by `dms_tools.file_io.ReadDMSCounts`.
`names` : a list of strings giving the names of the samples
corresponding to each entry in `codon_counts`.
`plotfile` : name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be ``.pdf``.
`ylabel` : the text placed on the ylabel.
"""
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
names = [name.replace('_', '\_') for name in names]
if not (isinstance(codon_counts, list)) and codon_counts:
raise ValueError("codon_counts does not specify a non-empty list")
if len(codon_counts) != len(names):
raise ValueError("codon_counts and names differ in length")
bar1 = ['synonymous', 'nonsynonymous', 'stop codon']
bar2 = ['1 nucleotide mutation', '2 nucleotide mutations', '3 nucleotide mutations']
d = dict([(key, []) for key in bar1 + bar2])
for counts in codon_counts:
counts = dms_tools.utils.ClassifyCodonCounts(counts)
denom = float(counts['TOTAL_COUNTS'])
if not denom:
raise ValueError("no counts for a sample")
for key in bar1 + bar2:
if key == 'nonsynonymous':
d[key].append(counts['TOTAL_NS'] / denom)
elif key == 'synonymous':
d[key].append(counts['TOTAL_SYN'] / denom)
elif key == 'stop codon':
d[key].append(counts['TOTAL_STOP'] / denom)
elif key == '1 nucleotide mutation':
d[key].append(counts['TOTAL_N_1MUT'] / denom)
elif key == '2 nucleotide mutations':
d[key].append(counts['TOTAL_N_2MUT'] / denom)
elif key == '3 nucleotide mutations':
d[key].append(counts['TOTAL_N_3MUT'] / denom)
else:
raise ValueError("Invalid key of %s" % key)
nsamples = len(names)
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=10)
matplotlib.rc('legend', fontsize=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('patch', linewidth=0.5)
# make stacked bar graph
fig = pylab.figure(figsize=(6.6, 3.75))
(lmargin, rmargin, bmargin, tmargin) = (0.08, 0.01, 0.43, 0.13)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
bars = []
for (ibar, keys, colors) in [(0, bar1, 'brg'), (1, bar2, 'myc')]:
bottoms = [0] * nsamples
barwidth = 0.35
indices = [i - barwidth + ibar * barwidth for i in range(1, nsamples + 1)]
totalheights = [0 for i in range(nsamples)]
for (key, color) in zip(keys, colors):
totalheights = [totalheights[i] + d[key][i] for i in range(nsamples)]
b = pylab.bar(indices, d[key], width=barwidth, bottom=bottoms, color=color)
bars.append(b)
for i in range(nsamples):
bottoms[i] += d[key][i]
ymax = max(bottoms)
pylab.gca().set_ylim([0, 1.08 * ymax])
pylab.xticks([i for i in indices], names, rotation=90)
pylab.gca().set_xlim([0.4, nsamples + 0.6])
yformatter = pylab.ScalarFormatter(useMathText=True)
yformatter.set_powerlimits((-2, 2))
pylab.gca().yaxis.set_major_formatter(yformatter)
yticker = matplotlib.ticker.MaxNLocator(5)
pylab.gca().yaxis.set_major_locator(yticker)
barmarks = [b[0] for b in bars]
barlabels = bar1 + bar2
# reorder bar labels since pylab puts them down columns
barmarks = [barmarks[0], barmarks[3], barmarks[1], barmarks[4], barmarks[2], barmarks[5]]
barlabels = [barlabels[0], barlabels[3], barlabels[1], barlabels[4], barlabels[2], barlabels[5]]
pylab.legend(barmarks, barlabels, handlelength=1.1,\
bbox_to_anchor=(0.54, 1.31), loc='upper center', ncol=3)
pylab.ylabel(ylabel, size=10)
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def PlotMutCountFracs(plotfile, title, names, all_cumulfracs, syn_cumulfracs, all_counts, syn_counts, legendloc, writecounts=True, nmax=None):
"""Plots fraction of mutations with >= a given number of counts.
Does this for both all mutations and synonymous mutations. The plots
are placed side by side.
The plots show the fractions of mutations
that are found >= *n* times for a range of values of *n*.
CALLING VARIABLES:
* *plotfile* : string giving name of the created plot file. Must end in
the extension ``.pdf``.
* *title* : string giving the title placed about the plot.
* *names* : a list of strings giving the names of the samples to
plot.
* *all_cumulfracs* : a list of the same length as *names* giving
the cumulative fractions for all mutations. Each entry should be a list,
and all of these lists should be the same length. *cumulfracs[i][n]*
gives the fraction of mutations to sample *names[i]* that
are found >= *n* times. The x-axis of the created plot
will go from 0 to *len(all_cumulfracs[0]) - 1*.
* *syn_cumulfracs* : a list like *all_cumulfracs* except for synonymous
mutations.
* *all_counts* : integer counts of all mutations (the total number of mutations
used for *all_cumulfracs*), used to create plot title.
* *syn_counts* : like *all_counts* but for synonymous mutations.
* *legendloc* : specifies the location of the legend. Should be a string.
Valid values are:
- *bottom* : put legend at the bottom of the plot.
- *right* : put legend at the right of the plot.
* *writecounts* is a Boolean switch specifying whether we include the counts of all
mutations (specified by *all_counts* and *syn_counts*) in the plot title. We do
this if *writecounts* is *True*, and do not if it is *False*.
*nmax* if specified, should be an integer > 1 giving the x-axis maximum.
"""
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError('plotfile must end in .pdf: %s' % plotfile)
# plot setup stuff
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('legend', fontsize=11)
if legendloc == 'bottom':
ncol = 4 # number of columns
legendrowheight = 0.2
nrows = math.ceil(len(names) / float(ncol))
xlegendmargin = 0.0
elif legendloc == 'right':
ncol = 1
nrows = 0 # we specify by columns
legendrowheight = 0
xlegendmargin = 1.45
else:
raise ValueError("Invalid legendloc of %s" % legendloc)
(xsize, ysize) = (4.75 + xlegendmargin, legendrowheight * nrows + 2.4)
styles = ['k-.', 'y-.', 'b-', 'r:', 'g:', 'c--', 'm--', 'k-', 'y-', 'b:', 'r-', 'g-', 'c:', 'm:', 'k--', 'y:', 'b--', 'g--', 'c-', 'm-', 'k:']
styles = ['k-.', 'y-.', 'b-', 'r:', 'g:', 'c--', 'm--', 'k:', 'y-', 'b-.', 'r-', 'g--', 'm:']
lmargin = 0.11 * (xsize - xlegendmargin) / xsize # left margin for plot
rmargin = 0.01 + xlegendmargin / xsize # right margin for plot
centermargin = 0.02 # horizontal space between plots
tmargin = 0.16 # top margin
bmargin = 0.22 + (legendrowheight * nrows) / ysize # bottom margin
plotwidth = (1.0 - lmargin - rmargin - centermargin) / 2.0
assert 0 < len(syn_cumulfracs) == len(all_cumulfracs) == len(names) <= len(styles), "Must specify equal numbers of fractions and names, and no more than %d" % len(styles)
# start plotting
fig = pylab.figure(figsize=(xsize, ysize))
ax_all = pylab.axes([lmargin, bmargin, plotwidth, 1 - bmargin - tmargin])
ax_syn = pylab.axes([lmargin + plotwidth + centermargin, bmargin, plotwidth, 1 - bmargin - tmargin])
for (cumulfracs, ax, write_ylabel, ax_title) in [(syn_cumulfracs, ax_syn, False, 'synonymous (%d total)' % syn_counts), (all_cumulfracs, ax_all, True, 'all (%d total)' % all_counts)]:
if not writecounts:
ax_title = ax_title.split()[0]
pylab.axes(ax)
if not nmax:
nmax = max([len(x) for x in cumulfracs])
assert nmax, "Length of entries in cumulfracs must be >= 1"
else:
assert nmax > 1, "nmax should be > 1"
lines = []
xs = [n for n in range(0, nmax)]
for i in range(len(names)):
i_cumulfracs = (cumulfracs[i] + [0] * (nmax - len(cumulfracs[i])))[ : nmax]
plotline = pylab.plot(xs, i_cumulfracs, styles[i], lw=1.5)
lines.append(plotline[0])
pylab.xlabel("Mutation counts", size=11)
if write_ylabel:
pylab.ylabel("Frac. $\ge$ this many counts", size=11)
else:
yformatter = matplotlib.ticker.NullFormatter()
pylab.gca().yaxis.set_major_formatter(yformatter)
pylab.gca().set_ylim([-0.02, 1.02])
yticker = matplotlib.ticker.FixedLocator([0.0, 0.5, 1.0])
pylab.gca().yaxis.set_major_locator(yticker)
pylab.gca().set_xlim([0, nmax - 1])
xticker = matplotlib.ticker.MaxNLocator(4)
pylab.gca().xaxis.set_major_locator(xticker)
if title:
pylab.title(ax_title, size=11)
pylab.suptitle("{\\bf %s}" % title, size=11)
if legendloc == 'bottom':
fig.legend(lines, [name.replace('_', ' ') for name in names], handlelength=2.25, handletextpad=0.2, columnspacing=0.8, ncol=ncol, bbox_to_anchor=(0.5, -0.01), loc='lower center')
elif legendloc == 'right':
fig.legend(lines, [name.replace('_', ' ') for name in names], handlelength=2.25, handletextpad=0.2, columnspacing=0.8, ncol=ncol, bbox_to_anchor=(1.0, 0.52), loc='center right')
else:
raise ValueError("Invalid legendloc of %s" % legendloc)
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def PlotReadStarts(names, depthbystart, plotfile):
"""Plots distribution of read starts.
*names* : list of sample names.
*depthbystart* : dictionary keyed by each name in *names*. V
Value is dictionary keyed by read start position, value is
number of reads. All names must have same start positions.
*plotfile* : name of created PDF plot.
"""
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must in in .pdf but got %s" % plotfile)
assert len(names) == len(depthbystart) > 0, "names and depthbystart are not non-empty and of the same length"
starts = list(depthbystart[0].keys())
starts.sort()
assert all([set(istarts.keys()) == set(starts) for istarts in depthbystart]), "Not same starts for all samples"
matplotlib.rc('text', usetex=True)
matplotlib.rc('legend', fontsize=10)
matplotlib.rc('font', size=10)
fig = pylab.figure(figsize=(5, 3.25))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.02, 0.12, 0.21)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - bmargin - tmargin])
lines = []
styles = ['bo-', 'rs-', 'gd-.', 'c^:', 'mx-', 'y*--', 'kv-.']
assert len(names) <= len(styles), "too many names for specified styles"
for (name, depth, style) in zip(names, depthbystart, styles):
line = pylab.plot(starts, [depth[start] for start in starts], style)
lines.append(line[0])
pylab.legend(lines, [name.replace('_', ' ') for name in names], ncol=int(math.ceil(len(names) / 2.0)), loc='upper center', numpoints=1, bbox_to_anchor=(0.54, 1.25))
yformatter = pylab.ScalarFormatter(useMathText=True)
yformatter.set_powerlimits((-2, 4))
pylab.gca().yaxis.set_major_formatter(yformatter)
pylab.gca().set_xlim([starts[0] - 0.03 * (starts[-1] - starts[0]), starts[-1] + 0.03 * (starts[-1] - starts[0])])
pylab.xticks(starts)
pylab.xlabel('read start position')
pylab.ylabel('number of reads')
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def PlotSampleBargraphs(names, categories, data, plotfile, ylabel, groupbyfirstword=True, ncolumns=3):
"""Plots bargraph of counts for different samples.
*names* is a list of the names of the different samples.
*categories* is a list of the different categories for each sample.
*data* is a dictionary keyed by each name in *names*, and that value
is in turn a dictionary keyed by each category in *categories*.
*plotfile* is the name of the PDF file that we are creating.
*ylabel* is the label placed on the y-axis.
*groupbyfirstword* is a Boolean switch that specifies whether we group
consecutive categories with the same first word in the string to have
the same color.
*ncolumns* is the number of columns in each legend line.
"""
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must in in .pdf but got %s" % plotfile)
assert len(names) == len(data) > 0, "names and data are not non-empty and of the same length"
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
hatches = ['', '//////', '*****', '.....', '*', 'o']
matplotlib.rc('text', usetex=True)
matplotlib.rc('legend', fontsize=10)
matplotlib.rc('font', size=10)
matplotlib.rc('patch', linewidth=0.75)
ncol = ncolumns # number of legend columns
nlegendrows = int(math.ceil(len(categories) / float(ncol)))
fig = pylab.figure(figsize=(6, 3.96 * (1.02 + 0.055 * nlegendrows)))
(lmargin, rmargin, bmargin, tmargin) = (0.09, 0.01, 0.41, 0.0175 + 0.046 * nlegendrows)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - bmargin - tmargin])
bottoms = [0] * len(names)
bars = []
barwidth = 0.5
indices = [i - barwidth / 2. for i in range(1, len(names) + 1)]
icolor = ihatch = 0
previousfirstword = None
for category in categories:
if previousfirstword == None:
pass
elif groupbyfirstword:
if previousfirstword == category.split()[0]:
ihatch += 1
else:
ihatch = 0
icolor += 1
else:
icolor += 1
previousfirstword = category.split()[0]
b = pylab.bar(indices, [data[name][category] for name in names], width=barwidth, bottom=bottoms, color=colors[icolor % len(colors)], hatch=hatches[ihatch % len(hatches)])
bars.append(b)
for (iname, name) in enumerate(names):
bottoms[iname] += data[name][category]
ymax = max(bottoms)
pylab.gca().set_ylim([0, 1.04 * ymax])
pylab.xticks([i + barwidth / 2. for i in indices], [name.replace('_', ' ') for name in names], rotation=90)
pylab.gca().set_xlim([0.5, len(names) + 0.5])
yformatter = pylab.ScalarFormatter(useMathText=True)
yformatter.set_powerlimits((-3, 3))
pylab.gca().yaxis.set_major_formatter(yformatter)
pylab.legend([b[0] for b in bars], categories, handletextpad=0.3, handlelength=1.3, columnspacing=1.1, bbox_to_anchor=(0.53, 1.037 + 0.11 * nlegendrows), loc='upper center', ncol=ncol)
pylab.ylabel(ylabel)
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
moehuster/python | SimpleGUICS2Pygame/simpleplot.py | 1 | 9935 | # -*- coding: latin-1 -*-
"""
simpleplot (November 1st, 2013)
Replace the simpleplot module of CodeSkulptor.
Require matplotlib_
(`Unofficial Windows Binaries`_)
(and must be installed separately).
Piece of SimpleGUICS2Pygame.
https://bitbucket.org/OPiMedia/simpleguics2pygame
GPLv3 --- Copyright (C) 2013 Olivier Pirson
http://www.opimedia.be/
.. _matplotlib: http://matplotlib.org/
.. _`Unofficial Windows Binaries`: http://www.lfd.uci.edu/~gohlke/pythonlibs/#matplotlib
"""
try:
import matplotlib.pyplot
except Exception as e:
import os
if os.environ.get('READTHEDOCS', None) != 'True':
raise Exception('matplotlib not installed! ' + str(e))
#
# Private global constant
##########################
_COLORS = ('#edc240', '#afd8f8', '#cb4b4b', '#4da74d',
'#9440ed', '#bd9b33', '#8cacc6', '#a23c3c',
'#3d853d', '#7633bd', '#ffe84c', '#d2ffff',
'#f35a5a', '#5cc85c', '#b14cff', '#8e7426',
'#698194', '#792d2d', '#2e642e', '#58268e',
'#ffff59', '#f4ffff', '#ff6969', '#6be96b',
'#cf59ff', '#5e4d19', '#455663', '#511d1d',
'#1e421e', '#3b195e', '#ffff66', '#ffffff')
"""
Color used for each graph.
**(Not available in SimpleGUI of CodeSkulptor.)**
"""
#
# Functions
############
def _block():
"""
If some plot windows are open
then block the program until closing all windows.
**(Not available in SimpleGUI of CodeSkulptor.)**
"""
matplotlib.pyplot.show()
def plot_bars(framename, width, height, xlabel, ylabel, datasets,
legends=None,
_block=False, _filename=None):
"""
Open a window titled `framename`
and plot graphes with `datasets` data.
`xlabel` and `ylabel` are labels of x-axis and y-axis.
`datasets` must be a sequence of data.
Each data must be:
* Sequence of pair x, y.
Each point (x, y) is represented by a vertical bar of height y.
* Or dict x: y.
Each point (x, y) is represented by a vertical bar of height y.
If `legends` is not None
then it must be a sequence of legend of each graph.
If `_block`
then block the program until closing the window
else continue and close the window when program stop.
**(Option not available in SimpleGUI of CodeSkulptor.)**
If `_filename` is not None
then save the image to this file.
**(Option not available in SimpleGUI of CodeSkulptor.)**
:param framename: str
:param width: int > 0
:param height: int > 0
:param xlabel: str
:param ylabel: str
:param datasets: (list or tuple)
of (((list or tuple) of ([int or float, int or float]
or (int or float, int or float)))
or (dict (int or float): (int or float)))
:param legends: None or ((list or tuple) of same length as datasets)
:param _block: False
:param _filename: None or str
"""
assert isinstance(framename, str), type(framename)
assert isinstance(width, int), type(width)
assert width > 0, width
assert isinstance(height, int), type(height)
assert height > 0, height
assert isinstance(xlabel, str), type(xlabel)
assert isinstance(ylabel, str), type(ylabel)
assert isinstance(datasets, list) or isinstance(datasets, tuple), \
type(datasets)
if __debug__:
for dataset in datasets:
assert isinstance(dataset, list) or isinstance(dataset, tuple) \
or isinstance(dataset, dict), type(datasets)
for x, y in (dataset.items() if isinstance(dataset, dict)
else dataset):
assert isinstance(x, int) or isinstance(x, float), (type(x), x)
assert isinstance(y, int) or isinstance(y, float), (type(y), y)
assert ((legends is None) or isinstance(legends, list)
or isinstance(legends, tuple)), type(legends)
assert (legends is None) or (len(legends) == len(datasets)), legends
assert isinstance(_block, bool), type(_block)
assert (_filename is None) or isinstance(_filename, str), type(_filename)
fig = matplotlib.pyplot.figure()
fig.set_size_inches(width//fig.get_dpi(), height//fig.get_dpi(),
forward=True)
fig.canvas.set_window_title(framename)
matplotlib.pyplot.title(framename)
from os.path import sep
icon_path = __file__.split(sep)[:-1]
try:
icon_path.extend(('_img', 'SimpleGUICS2Pygame_32x32.ico'))
matplotlib.pyplot.get_current_fig_manager().window.wm_iconbitmap(
sep.join(icon_path))
except:
pass
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(ylabel)
matplotlib.pyplot.grid()
bar_width = 0.8/len(datasets)
for i, dataset in enumerate(datasets):
bar_lefts, bar_heights = zip(*(sorted(dataset.items())
if isinstance(dataset, dict)
else dataset))
matplotlib.pyplot.bar([x + bar_width*i for x in bar_lefts],
bar_heights,
width=bar_width,
color=_COLORS[i % len(_COLORS)],
edgecolor=_COLORS[i % len(_COLORS)],
figure=fig,
alpha=0.5)
ymin, ymax = matplotlib.pyplot.ylim()
matplotlib.pyplot.ylim(ymin, ymax + 1)
if legends is not None:
matplotlib.pyplot.legend(legends, loc='upper right')
matplotlib.pyplot.show(block=_block)
if _filename is not None:
matplotlib.pyplot.savefig(_filename)
def plot_lines(framename, width, height, xlabel, ylabel, datasets,
points=False, legends=None,
_block=False, _filename=None):
"""
Open a window titled `framename`
and plot graphes with `datasets` data.
`xlabel` and `ylabel` are labels of x-axis and y-axis.
`datasets` must be a sequence of data.
Each data must be:
* Sequence of pair x, y.
Each point (x, y) is plotted (in given order)
and connected with line to previous and next points.
* Or dict x: y.
Each point (x, y) is plotted (in ascending order of x value)
and connected with line to previous and next points.
If `points`
then each point is highlighted by a small disc
(a small circle in CodeSkulptor).
If `legends` is not None
then it must be a sequence of legend of each graph.
If `_block`
then block the program until closing the window
else continue and close the window when program stop.
**(Option not available in SimpleGUI of CodeSkulptor.)**
If `_filename` is not None
then save the image to this file.
**(Option not available in SimpleGUI of CodeSkulptor.)**
:param framename: str
:param width: int > 0
:param height: int > 0
:param xlabel: str
:param ylabel: str
:param datasets: (list or tuple)
of (((list or tuple) of ([int or float, int or float]
or (int or float, int or float)))
or (dict (int or float): (int or float)))
:param points: bool
:param legends: None or ((list or tuple) of same length as datasets)
:param _block: False
:param _filename: None or str
"""
assert isinstance(framename, str), type(framename)
assert isinstance(width, int), type(width)
assert width > 0, width
assert isinstance(height, int), type(height)
assert height > 0, height
assert isinstance(xlabel, str), type(xlabel)
assert isinstance(ylabel, str), type(ylabel)
assert isinstance(datasets, list) or isinstance(datasets, tuple), \
type(datasets)
if __debug__:
for dataset in datasets:
assert isinstance(dataset, list) or isinstance(dataset, tuple) \
or isinstance(dataset, dict), type(datasets)
for x, y in (dataset.items() if isinstance(dataset, dict)
else dataset):
assert isinstance(x, int) or isinstance(x, float), (type(x), x)
assert isinstance(y, int) or isinstance(y, float), (type(y), y)
assert isinstance(points, bool), type(points)
assert ((legends is None) or isinstance(legends, list)
or isinstance(legends, tuple)), type(legends)
assert (legends is None) or (len(legends) == len(datasets)), legends
assert isinstance(_block, bool), type(_block)
assert (_filename is None) or isinstance(_filename, str), type(_filename)
fig = matplotlib.pyplot.figure()
fig.set_size_inches(width//fig.get_dpi(), height//fig.get_dpi(),
forward=True)
fig.canvas.set_window_title(framename)
matplotlib.pyplot.title(framename)
from os.path import sep
icon_path = __file__.split(sep)[:-1]
try:
icon_path.extend(('_img', 'SimpleGUICS2Pygame_32x32.ico'))
matplotlib.pyplot.get_current_fig_manager().window.wm_iconbitmap(
sep.join(icon_path))
except:
pass
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(ylabel)
matplotlib.pyplot.grid()
for i, dataset in enumerate(datasets):
matplotlib.pyplot.plot(*zip(*(sorted(dataset.items())
if isinstance(dataset, dict)
else dataset)),
color=_COLORS[i % len(_COLORS)],
figure=fig,
marker=('o' if points
else None))
ymin, ymax = matplotlib.pyplot.ylim()
matplotlib.pyplot.ylim(ymin - 1, ymax + 1)
if legends is not None:
matplotlib.pyplot.legend(legends, loc='upper right')
matplotlib.pyplot.show(block=_block)
if _filename is not None:
matplotlib.pyplot.savefig(_filename)
| gpl-2.0 |
wwf5067/statsmodels | statsmodels/graphics/utils.py | 26 | 4138 | """Helper functions for graphics with Matplotlib."""
from statsmodels.compat.python import lrange, range
__all__ = ['create_mpl_ax', 'create_mpl_fig']
def _import_mpl():
"""This function is not needed outside this utils module."""
try:
import matplotlib.pyplot as plt
except:
raise ImportError("Matplotlib is not found.")
return plt
def create_mpl_ax(ax=None):
"""Helper function for when a single plot axis is needed.
Parameters
----------
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
ax : Matplotlib AxesSubplot instance
The created axis if `ax` is None, otherwise the axis that was passed
in.
Notes
-----
This function imports `matplotlib.pyplot`, which should only be done to
create (a) figure(s) with ``plt.figure``. All other functionality exposed
by the pyplot module can and should be imported directly from its
Matplotlib module.
See Also
--------
create_mpl_fig
Examples
--------
A plotting function has a keyword ``ax=None``. Then calls:
>>> from statsmodels.graphics import utils
>>> fig, ax = utils.create_mpl_ax(ax)
"""
if ax is None:
plt = _import_mpl()
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.figure
return fig, ax
def create_mpl_fig(fig=None, figsize=None):
"""Helper function for when multiple plot axes are needed.
Those axes should be created in the functions they are used in, with
``fig.add_subplot()``.
Parameters
----------
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise the input `fig` is
returned.
See Also
--------
create_mpl_ax
"""
if fig is None:
plt = _import_mpl()
fig = plt.figure(figsize=figsize)
return fig
def maybe_name_or_idx(idx, model):
"""
Give a name or an integer and return the name and integer location of the
column in a design matrix.
"""
if idx is None:
idx = lrange(model.exog.shape[1])
if isinstance(idx, int):
exog_name = model.exog_names[idx]
exog_idx = idx
# anticipate index as list and recurse
elif isinstance(idx, (tuple, list)):
exog_name = []
exog_idx = []
for item in idx:
exog_name_item, exog_idx_item = maybe_name_or_idx(item, model)
exog_name.append(exog_name_item)
exog_idx.append(exog_idx_item)
else: # assume we've got a string variable
exog_name = idx
exog_idx = model.exog_names.index(idx)
return exog_name, exog_idx
def get_data_names(series_or_dataframe):
"""
Input can be an array or pandas-like. Will handle 1d array-like but not
2d. Returns a str for 1d data or a list of strings for 2d data.
"""
names = getattr(series_or_dataframe, 'name', None)
if not names:
names = getattr(series_or_dataframe, 'columns', None)
if not names:
shape = getattr(series_or_dataframe, 'shape', [1])
nvars = 1 if len(shape) == 1 else series.shape[1]
names = ["X%d" for names in range(nvars)]
if nvars == 1:
names = names[0]
else:
names = names.tolist()
return names
def annotate_axes(index, labels, points, offset_points, size, ax, **kwargs):
"""
Annotate Axes with labels, points, offset_points according to the
given index.
"""
for i in index:
label = labels[i]
point = points[i]
offset = offset_points[i]
ax.annotate(label, point, xytext=offset, textcoords="offset points",
size=size, **kwargs)
return ax
| bsd-3-clause |
duncanwp/iris | lib/iris/tests/unit/plot/__init__.py | 2 | 4524 | # (C) British Crown Copyright 2014 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.plot` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.plot import _broadcast_2d as broadcast
from iris.coords import AuxCoord
from iris.tests.stock import simple_2d, lat_lon_cube
@tests.skip_plot
class TestGraphicStringCoord(tests.GraphicsTest):
def setUp(self):
super(TestGraphicStringCoord, self).setUp()
self.cube = simple_2d(with_bounds=True)
self.cube.add_aux_coord(AuxCoord(list('abcd'),
long_name='str_coord'), 1)
self.lat_lon_cube = lat_lon_cube()
def tick_loc_and_label(self, axis_name, axes=None):
# Intentional lazy import so that subclasses can have an opportunity
# to change the backend.
import matplotlib.pyplot as plt
# Draw the plot to 'fix' the ticks.
if axes:
axes.figure.canvas.draw()
else:
axes = plt.gca()
plt.draw()
axis = getattr(axes, axis_name)
locations = axis.get_majorticklocs()
labels = [tick.get_text() for tick in axis.get_ticklabels()]
return list(zip(locations, labels))
def assertBoundsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(-1.0, 'a'), (0.0, 'a'), (1.0, 'b'),
(2.0, 'c'), (3.0, 'd'), (4.0, u'')]
self.assertEqual(expected, actual)
def assertPointsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd')]
self.assertEqual(expected, actual)
@tests.skip_plot
class MixinCoords(object):
"""
Mixin class of common plotting tests providing 2-dimensional
permutations of coordinates and anonymous dimensions.
"""
def _check(self, u, v, data=None):
self.assertEqual(self.mpl_patch.call_count, 1)
if data is not None:
(actual_u, actual_v, actual_data), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_data, data)
else:
(actual_u, actual_v), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_u, u)
self.assertArrayEqual(actual_v, v)
def test_foo_bar(self):
self.draw_func(self.cube, coords=('foo', 'bar'))
u, v = broadcast(self.foo, self.bar)
self._check(u, v, self.data)
def test_bar_foo(self):
self.draw_func(self.cube, coords=('bar', 'foo'))
u, v = broadcast(self.bar, self.foo)
self._check(u, v, self.dataT)
def test_foo_0(self):
self.draw_func(self.cube, coords=('foo', 0))
u, v = broadcast(self.foo, self.bar_index)
self._check(u, v, self.data)
def test_1_bar(self):
self.draw_func(self.cube, coords=(1, 'bar'))
u, v = broadcast(self.foo_index, self.bar)
self._check(u, v, self.data)
def test_1_0(self):
self.draw_func(self.cube, coords=(1, 0))
u, v = broadcast(self.foo_index, self.bar_index)
self._check(u, v, self.data)
def test_0_foo(self):
self.draw_func(self.cube, coords=(0, 'foo'))
u, v = broadcast(self.bar_index, self.foo)
self._check(u, v, self.dataT)
def test_bar_1(self):
self.draw_func(self.cube, coords=('bar', 1))
u, v = broadcast(self.bar, self.foo_index)
self._check(u, v, self.dataT)
def test_0_1(self):
self.draw_func(self.cube, coords=(0, 1))
u, v = broadcast(self.bar_index, self.foo_index)
self._check(u, v, self.dataT)
| lgpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/indexes/period/test_period.py | 3 | 29955 | import pytest
import numpy as np
from numpy.random import randn
from datetime import timedelta
import pandas as pd
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, notnull, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
offsets, compat)
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
pytest.raises(ValueError, idx.astype, str)
pytest.raises(ValueError, idx.astype, float)
pytest.raises(ValueError, idx.astype, 'timedelta64')
pytest.raises(ValueError, idx.astype, 'timedelta64[ns]')
def test_pickle_compat_construction(self):
pass
def test_pickle_round_trip(self):
for freq in ['D', 'M', 'A']:
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1
assert idx.get_loc(idx[1].to_timestamp(), method) == 1
assert idx.get_loc(idx[1].to_timestamp()
.to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
idx = pd.period_range('2000-01-01', periods=5)[::2]
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with pytest.raises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
i = self.create_index()
cond = [False] + [True] * (len(i) - 1)
klasses = [list, tuple, np.array, Series]
expected = pd.PeriodIndex([pd.NaT] + i[1:].tolist(), freq='D')
for klass in klasses:
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
tm.assert_index_equal(res, exp)
assert res.freqstr == 'D'
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
assert expected == result
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_asobject_like(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT',
'2011-05'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_ndarray_compat_properties(self):
if compat.is_platform_32bit():
pytest.skip("skipping on 32bit")
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, index, 2, axis=1)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
tm.assert_index_equal(result, expected)
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('how', ['outer', 'inner', 'left', 'right'])
def test_join_self(self, how):
index = period_range('1/1/2000', periods=10)
joined = index.join(index, how=how)
assert index is joined
| mit |
ferris-wufei/toolbox | dm/dm_MMF/MMF.py | 1 | 3287 | # -*- coding: utf-8 -*-
"""
author: ferris
create: 2016-05-28
title: Max Mean Filters
function: find a group of one-sided-intervals as filters of a dataframe, to maximize the mean value of some column y
in the filtered subset, while keep the size of the subset moderately large. Implemented with linked list.
"""
import numpy as np
class Node:
def __init__(self, data=None, feature=None, cut=None, child=None, sign=None, method='max_mean'):
self.method = method
self.feature = feature
self.cut = cut
self.sign = sign
self.data = data
self.child = child
def train(df, d=None, y='target', rg=200, method='max_mean'):
"""
Args:
df: pandas DataFrame
d: size of linked list
y: column name of df to calculate metrics
rg: regularization parameter
method: bak parameter
Returns: linked list with each node saving a filtering condition (and filtered dataframe except for the first node)
"""
if df.shape[0] == 0:
return None
# feature list, everything except y
ft = df.columns.drop(y)
# reached max depth
if d is not None and d <= 0:
return Node(data=df, method=method)
# trackers
bst_val = float('-inf')
bst_ft = None
bst_cut = None
bst_sub = None
bst_sign = None
for f in ft:
# unique values
f_val = df[f].unique()
if len(f_val) <= 1:
continue
for f_v in f_val:
# dataframe indices of left & right branches
l_idx = (df[f] <= f_v)
r_idx = (df[f] > f_v)
# size of left & right branches
l_size = sum(l_idx)
r_size = sum(r_idx)
if l_size == 0 or r_size == 0:
continue
# penalized mean of left & right branches
l_m = df[y][l_idx].mean() - (rg / np.power(l_size, 0.9))
r_m = df[y][r_idx].mean() - (rg / np.power(r_size, 0.9))
# update best choice
tmp_val = max(l_m, r_m)
if tmp_val >= bst_val:
bst_val = tmp_val
bst_ft = f
bst_cut = f_v
bst_sub = df[l_idx] if l_m >= r_m else df[r_idx]
bst_sign = '<=' if l_m >= r_m else '>'
# recurrence
if bst_sub is not None:
# subtract size
if d is not None:
d_new = d - 1
else:
d_new = None
# train the child with stronger penalization
child = train(bst_sub, d=d_new, y=y, rg=rg * 1.1, method=method)
return Node(data=bst_sub, feature=bst_ft, cut=bst_cut, child=child, sign=bst_sign, method=method)
# stop split
else:
print('training complete')
return Node(data=df, method=method)
def plot_txt(dt, y='target'):
"""
Args:
dt: chain object
y: column name to calculate metric
Returns: text plot of the Nodes
"""
# reached terminal node
if not dt.child:
print("sample size: " + str(dt.data.shape[0]) + " mean value: " + str(dt.data[y].mean()))
# plot child
else:
print(str(dt.feature) + ' : ' + str(dt.sign) + str(dt.cut) + ' ---> ' + str(dt.data.shape[0]) + ' left')
plot_txt(dt.child, y=y)
| gpl-2.0 |
mwv/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
neuropycon/ephypype | examples/plot_preprocessing.py | 1 | 6944 | """
.. _preproc_meeg:
===================
Preprocess MEG data
===================
The preprocessing pipeline runs the ICA algorithm for an
automatic removal of eyes and heart related artefacts.
A report is automatically generated and can be used to correct
and/or fine-tune the correction in each subject.
The **input** data can be in **ds** or **fif** format.
"""
# Authors: Annalisa Pascarella <[email protected]>
# Mainak Jas <[email protected]>
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 2
import os.path as op
import nipype.pipeline.engine as pe
import ephypype
from ephypype.nodes import create_iterator, create_datagrabber
from ephypype.datasets import fetch_omega_dataset
###############################################################################
# Let us fetch the data first. It is around 675 MB download.
base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')
data_path = fetch_omega_dataset(base_path)
###############################################################################
# then read the parameters for experiment and preprocessing from a
# :download:`json <https://github.com/neuropycon/ephypype/tree/master/examples/params.json>`
# file and print it
import json # noqa
import pprint # noqa
params = json.load(open("params.json"))
pprint.pprint({'experiment parameters': params["general"]})
subject_ids = params["general"]["subject_ids"] # sub-003
session_ids = params["general"]["session_ids"] # ses-0001
NJOBS = params["general"]["NJOBS"]
data_type = params["general"]["data_type"]
pprint.pprint({'preprocessing parameters': params["preprocessing"]})
down_sfreq = params["preprocessing"]['down_sfreq']
l_freq = params["preprocessing"]['l_freq']
h_freq = params["preprocessing"]['h_freq']
ECG_ch_name = params["preprocessing"]['ECG_ch_name']
EoG_ch_name = params["preprocessing"]['EoG_ch_name']
variance = params["preprocessing"]['variance']
reject = params["preprocessing"]['reject']
###############################################################################
# Then, we create our workflow and specify the `base_dir` which tells
# nipype the directory in which to store the outputs.
# workflow directory within the `base_dir`
preproc_pipeline_name = 'preprocessing_workflow'
main_workflow = pe.Workflow(name=preproc_pipeline_name)
main_workflow.base_dir = data_path
###############################################################################
# Then we create a node to pass input filenames to DataGrabber from nipype
infosource = create_iterator(['subject_id', 'session_id'],
[subject_ids, session_ids])
###############################################################################
# and a node to grab data. The template_args in this node iterate upon
# the values in the infosource node
template_path = '*%s/%s/meg/%s*rest*0_60*raw.fif'
template_args = [['subject_id', 'session_id', 'subject_id']]
datasource = create_datagrabber(data_path, template_path, template_args)
###############################################################################
# Ephypype creates for us a pipeline which can be connected to these
# nodes we created. The preprocessing pipeline is implemented by the function
# :func:`ephypype.pipelines.preproc_meeg.create_pipeline_preproc_meeg`, thus to
# instantiate this pipeline node, we import it and pass our
# parameters to it.
# The preprocessing pipeline contains two nodes that are based on the MNE
# Python functions performing the decomposition of the MEG/EEG signal using an
# |ICA| algorithm.
#
# .. |ICA| raw:: html
#
# <a href="https://mne.tools/stable/auto_tutorials/preprocessing/plot_40_artifact_correction_ica.html" target="_blank">ICA</a>
#
# In particular, the two nodes are:
#
# * :class:`ephypype.interfaces.mne.preproc.PreprocFif` performs filtering on the raw data
# * :class:`ephypype.interfaces.mne.preproc.CompIca` computes ICA solution on raw fif data
from ephypype.pipelines import create_pipeline_preproc_meeg # noqa
preproc_workflow = create_pipeline_preproc_meeg(
data_path, l_freq=l_freq, h_freq=h_freq, down_sfreq=down_sfreq,
variance=variance, ECG_ch_name=ECG_ch_name, EoG_ch_name=EoG_ch_name,
data_type=data_type)
###############################################################################
# We then connect the nodes two at a time. First, we connect the two outputs
# (subject_id and session_id) of the infosource node to the datasource node.
# So, these two nodes taken together can grab data.
main_workflow.connect(infosource, 'subject_id', datasource, 'subject_id')
main_workflow.connect(infosource, 'session_id', datasource, 'session_id')
###############################################################################
# Similarly, for the inputnode of the preproc_workflow. Things will become
# clearer in a moment when we plot the graph of the workflow.
main_workflow.connect(infosource, 'subject_id',
preproc_workflow, 'inputnode.subject_id')
main_workflow.connect(datasource, 'raw_file',
preproc_workflow, 'inputnode.raw_file')
###############################################################################
# To do so, we first write the workflow graph (optional)
main_workflow.write_graph(graph2use='colored') # colored
###############################################################################
# and visualize it. Take a moment to pause and notice how the connections
# here correspond to how we connected the nodes.
import matplotlib.pyplot as plt # noqa
img = plt.imread(op.join(data_path, preproc_pipeline_name, 'graph.png'))
plt.figure(figsize=(6, 6))
plt.imshow(img)
plt.axis('off')
###############################################################################
# Finally, we are now ready to execute our workflow.
main_workflow.config['execution'] = {'remove_unnecessary_outputs': 'false'}
# Run workflow locally on 1 CPU
main_workflow.run(plugin='MultiProc', plugin_args={'n_procs': NJOBS})
###############################################################################
# The output is the preprocessed data stored in the workflow directory
# defined by `base_dir`.
#
# It’s a good rule to inspect the report file saved in the same dir to look at
# the excluded ICA components. It is also possible to include and exclude more
# components by using either a jupyter notebook or the preprocessing pipeline
# with different flag parameters.
###############################################################################
#
import mne # noqa
from ephypype.gather import get_results # noqa
ica_files, raw_files = get_results(main_workflow.base_dir,
main_workflow.name, pipeline='ica')
for ica_file, raw_file in zip(ica_files, raw_files):
raw = mne.io.read_raw_fif(raw_file)
ica = mne.preprocessing.read_ica(ica_file)
ica.plot_properties(raw, picks=ica.exclude, figsize=[4.5, 4.5])
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.