repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jat255/hyperspy
|
hyperspy/drawing/_widgets/range.py
|
4
|
22490
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.widgets import SpanSelector
import inspect
import logging
from hyperspy.drawing.widgets import ResizableDraggableWidgetBase
from hyperspy.events import Events, Event
_logger = logging.getLogger(__name__)
# Track if we have already warned when the widget is out of range
already_warn_out_of_range = False
def in_interval(number, interval):
if interval[0] <= number <= interval[1]:
return True
else:
return False
class RangeWidget(ResizableDraggableWidgetBase):
"""RangeWidget is a span-patch based widget, which can be
dragged and resized by mouse/keys. Basically a wrapper for
ModifiablepanSelector so that it conforms to the common widget interface.
For optimized changes of geometry, the class implements two methods
'set_bounds' and 'set_ibounds', to set the geometry of the rectangle by
value and index space coordinates, respectivly.
Implements the internal method _validate_geometry to make sure the patch
will always stay within bounds.
"""
def __init__(self, axes_manager, ax=None, alpha=0.5, **kwargs):
# Parse all kwargs for the matplotlib SpanSelector
self._SpanSelector_kwargs = {}
for key in inspect.signature(SpanSelector).parameters.keys():
if key in kwargs:
self._SpanSelector_kwargs[key] = kwargs.pop(key)
super(RangeWidget, self).__init__(axes_manager, alpha=alpha, **kwargs)
self.span = None
def set_on(self, value):
if value is not self.is_on() and self.ax is not None:
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
self.disconnect()
try:
self.ax.figure.canvas.draw_idle()
except BaseException: # figure does not exist
pass
if value is False:
self.ax = None
self.__is_on = value
def _add_patch_to(self, ax):
self.span = ModifiableSpanSelector(ax, **self._SpanSelector_kwargs)
self.span.set_initial(self._get_range())
self.span.bounds_check = True
self.span.snap_position = self.snap_position
self.span.snap_size = self.snap_size
self.span.can_switch = True
self.span.events.changed.connect(self._span_changed, {'obj': 'widget'})
self.span.step_ax = self.axes[0]
self.span.tolerance = 5
self.patch = [self.span.rect]
self.patch[0].set_color(self.color)
self.patch[0].set_alpha(self.alpha)
def _span_changed(self, widget):
r = self._get_range()
pr = widget.range
if r != pr:
dx = self.axes[0].scale
x = pr[0] + 0.5 * dx
w = pr[1] + 0.5 * dx - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._validate_geometry()
if self._pos != np.array([x]) or self._size != np.array([w]):
self._update_patch_size()
self._apply_changes(old_size=old_size, old_position=old_position)
def _get_range(self):
p = self._pos[0]
w = self._size[0]
offset = self.axes[0].scale
p -= 0.5 * offset
return (p, p + w)
def _parse_bounds_args(self, args, kwargs):
if len(args) == 1:
return args[0]
elif len(args) == 4:
return args
elif len(kwargs) == 1 and 'bounds' in kwargs:
return kwargs.values()[0]
else:
x = kwargs.pop('x', kwargs.pop('left', self._pos[0]))
if 'right' in kwargs:
w = kwargs.pop('right') - x
else:
w = kwargs.pop('w', kwargs.pop('width', self._size[0]))
return x, w
def set_ibounds(self, *args, **kwargs):
"""
Set bounds by indices. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right'
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
ix, iw = self._parse_bounds_args(args, kwargs)
x = self.axes[0].index2value(ix)
w = self._i2v(self.axes[0], ix + iw) - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def set_bounds(self, *args, **kwargs):
"""
Set bounds by values. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right' (x+w)
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
global already_warn_out_of_range
def warn(obj, parameter, value):
global already_warn_out_of_range
if not already_warn_out_of_range:
_logger.info('{}: {} is out of range. It is therefore set '
'to the value of {}'.format(obj, parameter, value))
already_warn_out_of_range = True
x, w = self._parse_bounds_args(args, kwargs)
l0, h0 = self.axes[0].low_value, self.axes[0].high_value
scale = self.axes[0].scale
in_range = 0
if x < l0:
x = l0
warn(self, '`x` or `left`', x)
elif h0 <= x:
x = h0 - scale
warn(self, '`x` or `left`', x)
else:
in_range += 1
if w < scale:
w = scale
warn(self, '`width` or `right`', w)
elif not (l0 + scale <= x + w <= h0 + scale):
if self.size != np.array([w]): # resize
w = h0 + scale - self.position[0]
warn(self, '`width` or `right`', w)
if self.position != np.array([x]): # moved
x = h0 + scale - self.size[0]
warn(self, '`x` or `left`', x)
else:
in_range += 1
# if we are in range again, reset `already_warn_out_of_range` to False
if in_range == 2 and already_warn_out_of_range:
_logger.info('{} back in range.'.format(self.__class__.__name__))
already_warn_out_of_range = False
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def _update_patch_position(self):
self._update_patch_geometry()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on() and self.span is not None:
self.span.range = self._get_range()
def disconnect(self):
super(RangeWidget, self).disconnect()
if self.span:
self.span.turn_off()
self.span = None
def _set_snap_position(self, value):
super(RangeWidget, self)._set_snap_position(value)
self.span.snap_position = value
self._update_patch_geometry()
def _set_snap_size(self, value):
super(RangeWidget, self)._set_snap_size(value)
self.span.snap_size = value
self._update_patch_size()
def _validate_geometry(self, x1=None):
"""Make sure the entire patch always stays within bounds. First the
position (either from position property or from x1 argument), is
limited within the bounds. Then, if the right edge are out of
bounds, the position is changed so that they will be at the limit.
The modified geometry is stored, but no change checks are performed.
Call _apply_changes after this in order to process any changes (the
size might change if it is set larger than the bounds size).
"""
xaxis = self.axes[0]
# Make sure widget size is not larger than axes
self._size[0] = min(self._size[0], xaxis.size * xaxis.scale)
# Make sure x1 is within bounds
if x1 is None:
x1 = self._pos[0] # Get it if not supplied
if x1 < xaxis.low_value:
x1 = xaxis.low_value
elif x1 > xaxis.high_value:
x1 = xaxis.high_value
# Make sure x2 is with upper bound.
# If not, keep dims, and change x1!
x2 = x1 + self._size[0]
if x2 > xaxis.high_value + xaxis.scale:
x2 = xaxis.high_value + xaxis.scale
x1 = x2 - self._size[0]
self._pos = np.array([x1])
# Apply snaps if appropriate
if self.snap_position:
self._do_snap_position()
if self.snap_size:
self._do_snap_size()
class ModifiableSpanSelector(SpanSelector):
def __init__(self, ax, **kwargs):
onselect = kwargs.pop('onselect', self.dummy)
direction = kwargs.pop('direction', 'horizontal')
useblit = kwargs.pop('useblit', ax.figure.canvas.supports_blit)
SpanSelector.__init__(self, ax, onselect, direction=direction,
useblit=useblit, span_stays=False, **kwargs)
# The tolerance in points to pick the rectangle sizes
self.tolerance = 2
self.on_move_cid = None
self._range = None
self.step_ax = None
self.bounds_check = False
self._button_down = False
self.snap_size = False
self.snap_position = False
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget was changed.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.can_switch = False
def dummy(self, *args, **kwargs):
pass
def _get_range(self):
self.update_range()
return self._range
def _set_range(self, value):
self.update_range()
if self._range != value:
resized = (
self._range[1] -
self._range[0]) != (
value[1] -
value[0])
moved = self._range[0] != value[0]
self._range = value
if moved:
self._set_span_x(value[0])
self.events.moved.trigger(self)
if resized:
self._set_span_width(value[1] - value[0])
self.events.resized.trigger(self)
if moved or resized:
self.draw_patch()
self.events.changed.trigger(self)
range = property(_get_range, _set_range)
def _set_span_x(self, value):
if self.direction == 'horizontal':
self.rect.set_x(value)
else:
self.rect.set_y(value)
def _set_span_width(self, value):
if self.direction == 'horizontal':
self.rect.set_width(value)
else:
self.rect.set_height(value)
def _get_span_x(self):
if self.direction == 'horizontal':
return self.rect.get_x()
else:
return self.rect.get_y()
def _get_span_width(self):
if self.direction == 'horizontal':
return self.rect.get_width()
else:
return self.rect.get_height()
def _get_mouse_position(self, event):
if self.direction == 'horizontal':
return event.xdata
else:
return event.ydata
def set_initial(self, initial_range=None):
"""
Remove selection events, set the spanner, and go to modify mode.
"""
if initial_range is not None:
self.range = initial_range
self.disconnect_events()
# And connect to the new ones
self.connect_event('button_press_event', self.mm_on_press)
self.connect_event('button_release_event', self.mm_on_release)
self.connect_event('draw_event', self.update_background)
self.rect.set_visible(True)
self.rect.contains = self.contains
def update(self, *args):
# Override the SpanSelector `update` method to blit properly all
# artirts before we go to "modify mode" in `set_initial`.
self.draw_patch()
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if self.useblit and hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig._update_animated()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError:
pass # When figure is None, typically when closing
def contains(self, mouseevent):
x, y = self.rect.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
v = x if self.direction == 'vertical' else y
# Assert y is correct first
if not (0.0 <= v <= 1.0):
return False, {}
x_pt = self._get_point_size_in_data_units()
hit = self._range[0] - x_pt, self._range[1] + x_pt
if hit[0] < self._get_mouse_position < hit[1]:
return True, {}
return False, {}
def release(self, event):
"""When the button is released, the span stays in the screen and the
iteractivity machinery passes to modify mode"""
if self.pressv is None or (self.ignore(
event) and not self._button_down):
return
self._button_down = False
self.update_range()
self.set_initial()
def _get_point_size_in_data_units(self):
# Calculate the point size in data units
invtrans = self.ax.transData.inverted()
(x, y) = (1, 0) if self.direction == 'horizontal' else (0, 1)
x_pt = self.tolerance * abs((invtrans.transform((x, y)) -
invtrans.transform((0, 0)))[y])
return x_pt
def mm_on_press(self, event):
if self.ignore(event) and not self._button_down:
return
self._button_down = True
x_pt = self._get_point_size_in_data_units()
# Determine the size of the regions for moving and stretching
self.update_range()
left_region = self._range[0] - x_pt, self._range[0] + x_pt
right_region = self._range[1] - x_pt, self._range[1] + x_pt
middle_region = self._range[0] + x_pt, self._range[1] - x_pt
if in_interval(self._get_mouse_position(event), left_region) is True:
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
elif in_interval(self._get_mouse_position(event), right_region):
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
elif in_interval(self._get_mouse_position(event), middle_region):
self.pressv = self._get_mouse_position(event)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_rect)
else:
return
def update_range(self):
self._range = (self._get_span_x(),
self._get_span_x() + self._get_span_width())
def switch_left_right(self, x, left_to_right):
if left_to_right:
if self.step_ax is not None:
if x > self.step_ax.high_value + self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r0 = self._range[1]
self._set_span_x(r0)
r1 = r0 + w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
else:
if self.step_ax is not None:
if x < self.step_ax.low_value - self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r1 = self._range[0]
r0 = r1 - w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
self._range = (r0, r1)
def move_left(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x < self.step_ax.low_value - self.step_ax.scale):
return
if self.snap_position:
snap_offset = self.step_ax.offset - 0.5 * self.step_ax.scale
elif self.snap_size:
snap_offset = self._range[1]
if self.snap_position or self.snap_size:
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the left edge beyond the right one.
if x >= self._range[1]:
if self.can_switch and x > self._range[1]:
self.switch_left_right(x, True)
self.move_right(event)
return
width_increment = self._range[0] - x
if self._get_span_width() + width_increment <= 0:
return
self._set_span_x(x)
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.moved.trigger(self)
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_right(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x > self.step_ax.high_value + self.step_ax.scale):
return
if self.snap_size:
snap_offset = self._range[0]
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the right edge beyond the left one.
if x <= self._range[0]:
if self.can_switch and x < self._range[0]:
self.switch_left_right(x, False)
self.move_left(event)
return
width_increment = x - self._range[1]
if self._get_span_width() + width_increment <= 0:
return
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_rect(self, event):
if self._button_down is False or self.ignore(event):
return
x_increment = self._get_mouse_position(event) - self.pressv
if self.step_ax is not None:
if (self.bounds_check
and self._range[0] <= self.step_ax.low_value
and self._get_mouse_position(event) <= self.pressv):
return
if (self.bounds_check
and self._range[1] >= self.step_ax.high_value
and self._get_mouse_position(event) >= self.pressv):
return
if self.snap_position:
rem = x_increment % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x_increment += rem
self._set_span_x(self._get_span_x() + x_increment)
self.update_range()
self.pressv += x_increment
self.events.moved.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def mm_on_release(self, event):
if self._button_down is False or self.ignore(event):
return
self._button_down = False
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = None
def turn_off(self):
self.disconnect_events()
if self.on_move_cid is not None:
self.canvas.mpl_disconnect(self.on_move_cid)
self.ax.patches.remove(self.rect)
self.ax.figure.canvas.draw_idle()
|
gpl-3.0
|
iaalm/ipython-xmpp
|
IpyAdapter.py
|
1
|
6657
|
import logging
import signal
from queue import Empty
from traitlets import (
Dict, Any
)
from traitlets.config import catch_config_error
from IPython.utils.warn import error
from jupyter_core.application import JupyterApp, base_aliases, base_flags, NoStart
from jupyter_client.consoleapp import (
JupyterConsoleApp, app_aliases, app_flags,
)
from jupyter_console.interactiveshell import ZMQTerminalInteractiveShell
from jupyter_console import __version__
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
jupyter console # start the ZMQ-based console
jupyter console --existing # connect to an existing ipython session
"""
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
# copy flags from mixin:
flags = dict(base_flags)
# start with mixin frontend flags:
frontend_flags = dict(app_flags)
# update full dict with frontend flags:
flags.update(frontend_flags)
# copy flags from mixin
aliases = dict(base_aliases)
# start with mixin frontend flags
frontend_aliases = dict(app_aliases)
# load updated frontend flags into full dict
aliases.update(frontend_aliases)
aliases['colors'] = 'InteractiveShell.colors'
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
frontend_aliases = set(frontend_aliases.keys())
frontend_flags = set(frontend_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ZMQTerminalIPythonApp(JupyterApp, JupyterConsoleApp):
name = "jupyter-console"
version = __version__
"""Start a terminal frontend to the IPython zmq kernel."""
description = """
The Jupyter terminal-based Console.
This launches a Console application inside a terminal.
The Console supports various extra features beyond the traditional
single-process Terminal IPython shell, such as connecting to an
existing ipython session, via:
jupyter console --existing
where the previous session could have been created by another ipython
console, an ipython qtconsole, or by opening an ipython notebook.
"""
examples = _examples
classes = [ZMQTerminalInteractiveShell] + JupyterConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_aliases = Any(frontend_aliases)
frontend_flags = Any(frontend_flags)
subcommands = Dict()
force_interact = True
def parse_command_line(self, argv=None):
super(ZMQTerminalIPythonApp, self).parse_command_line(argv)
self.build_kernel_argv(self.extra_args)
def init_shell(self):
JupyterConsoleApp.initialize(self)
# relay sigint to kernel
signal.signal(signal.SIGINT, self.handle_sigint)
self.shell = ZMQTerminalInteractiveShell.instance(parent=self,
display_banner=False,
manager=self.kernel_manager,
client=self.kernel_client,
)
self.shell.own_kernel = not self.existing
def init_gui_pylab(self):
# no-op, because we don't want to import matplotlib in the frontend.
pass
def handle_sigint(self, *args):
if self.shell._executing:
if self.kernel_manager:
self.kernel_manager.interrupt_kernel()
else:
self.shell.write_err('\n')
error("Cannot interrupt kernels we didn't start.\n")
else:
# raise the KeyboardInterrupt if we aren't waiting for execution,
# so that the interact loop advances, and prompt is redrawn, etc.
raise KeyboardInterrupt
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(ZMQTerminalIPythonApp, self).initialize(argv)
if self._dispatching:
return
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
def init_banner(self):
"""optionally display the banner"""
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def start(self):
# JupyterApp.start dispatches on NoStart
super(ZMQTerminalIPythonApp, self).start()
self.log.debug("Starting the jupyter console mainloop...")
#self.shell.mainloop()
def run(self, cmd, callback):
#TODO: read the jupyter_console.interactiveshell.ZMQTerminalInteractiveShell.interactive
# avoid use such closure
def fun(self, msg_id=''):
"""Process messages on the IOPub channel
This method consumes and processes messages on the IOPub channel,
such as stdout, stderr, execute_result and status.
It only displays output that is caused by this session.
"""
while self.client.iopub_channel.msg_ready():
sub_msg = self.client.iopub_channel.get_msg()
msg_type = sub_msg['header']['msg_type']
parent = sub_msg["parent_header"]
if self.include_output(sub_msg):
if msg_type == 'status':
self._execution_state = sub_msg["content"]["execution_state"]
elif msg_type == 'stream':
callback(sub_msg["content"]["text"])
elif msg_type == 'execute_result':
sendback_multimedia(sub_msg["content"]["data"], callback)
elif msg_type == 'error':
callback('Err')
for frame in sub_msg["content"]["traceback"]:
#print(frame)
callback(str(frame))
self.shell.__class__.handle_iopub = fun
if not self.shell.wait_for_kernel(self.shell.kernel_timeout):
print('kernel error')
self.shell.run_cell(cmd)
def sendback_multimedia(msg, callback):
for type in msg:
if type == 'text/plain':
callback('Out:'+msg[type])
else:
callback('Out:'+str(msg))
def init_ipy():
ins = ZMQTerminalIPythonApp.instance()
ins.initialize()
ins.start()
return ins
|
mit
|
NSLS-II-HXN/PyXRF
|
pyxrf/model/draw_image_rgb.py
|
1
|
31402
|
from __future__ import (absolute_import, division,
print_function)
import numpy as np
import math
from functools import partial
from matplotlib.figure import Figure, Axes
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes
from atom.api import Atom, Str, Typed, Int, List, Dict, Bool
from ..core.utils import normalize_data_by_scaler, grid_interpolate
from ..core.xrf_utils import check_if_eline_supported
from .draw_image import DrawImageAdvanced
import logging
logger = logging.getLogger(__name__)
np.seterr(divide='ignore', invalid='ignore') # turn off warning on invalid division
class DrawImageRGB(Atom):
"""
This class draws RGB image.
Attributes
----------
fig : object
matplotlib Figure
ax : Axes
The `Axes` object of matplotlib
ax_r : Axes
The `Axes` object to add the artist too
ax_g : Axes
The `Axes` object to add the artist too
ax_b : Axes
The `Axes` object to add the artist too
file_name : str
stat_dict : dict
determine which image to show
img_dict : dict
multiple data sets to plot, such as fit data, or roi data
img_dict_keys : list
data_opt : int
index to show which data is chosen to plot
dict_to_plot : dict
selected data dict to plot, i.e., fitting data or roi is selected
map_keys : list
keys of dict_to_plot
color_opt : str
orange or gray plot
scaler_norm_dict : dict
scaler normalization data, from img_dict
scaler_items : list
keys of scaler_norm_dict
scaler_name_index : int
index to select on GUI level
scaler_data : None or numpy
selected scaler data
pixel_or_pos : int
index to choose plot with pixel (== 0) or with positions (== 1)
grid_interpolate: bool
choose to interpolate 2D image in terms of x,y or not
plot_all : Bool
to control plot all of the data or not
"""
# Reference to FileIOMOdel
io_model = Typed(object)
fig = Typed(Figure)
ax = Typed(Axes)
ax_r = Typed(Axes)
ax_g = Typed(Axes)
ax_b = Typed(Axes)
data_opt = Int(0)
img_title = Str()
# plot_opt = Int(0)
# plot_item = Str()
dict_to_plot = Dict()
map_keys = List()
scaler_norm_dict = Dict()
scaler_items = List()
scaler_name_index = Int()
scaler_data = Typed(object)
pixel_or_pos = Int(0)
grid_interpolate = Bool(False)
plot_all = Bool(False)
limit_dict = Dict()
range_dict = Dict()
# 'stat_dict' is legacy from 'DrawImageAdvanced' class. It is not used here,
# but it may be repurposed in the future if multicolor map presentation is developed
stat_dict = Dict()
# Contains dictionary {"red": <key>, "green": <key>, "blue": <key>}, key is the key
# from the dictionary 'self.dict_to_plot' or None.
rgb_keys = List(str) # The list of keys in 'rgb_dict'
rgb_dict = Dict()
# Reference used to access some fields
img_model_adv = Typed(DrawImageAdvanced)
# Variable that indicates whether quanitative normalization should be applied to data
# Associated with 'Quantitative' checkbox
quantitative_normalization = Bool(False)
rgb_name_list = List() # List of names for RGB channels printed on the plot
rgb_limit = Dict()
name_not_scalable = List()
def __init__(self, *, io_model, img_model_adv):
self.io_model = io_model
self.img_model_adv = img_model_adv
self.fig = plt.figure(figsize=(3, 2))
self.rgb_name_list = ['R', 'G', 'B']
# Do not apply scaler norm on following data
self.name_not_scalable = ['r2_adjust', 'r_factor', 'alive', 'dead', 'elapsed_time',
'scaler_alive', 'i0_time', 'time', 'time_diff', 'dwell_time']
self.rgb_keys = ["red", "green", "blue"]
self._init_rgb_dict()
def img_dict_updated(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : bool
True - 'io_model.img_dict` was updated, False - ignore
"""
if change['value']:
self.select_dataset(self.io_model.img_dict_default_selected_item)
self.init_plot_status()
def init_plot_status(self):
# init of pos values
self.set_pixel_or_pos(0)
# init of scaler for normalization
self.scaler_name_index = 0
scaler_groups = [v for v in list(self.io_model.img_dict.keys()) if 'scaler' in v]
if len(scaler_groups) > 0:
# self.scaler_group_name = scaler_groups[0]
self.scaler_norm_dict = self.io_model.img_dict[scaler_groups[0]]
# for GUI purpose only
self.scaler_items = []
self.scaler_items = list(self.scaler_norm_dict.keys())
self.scaler_items.sort()
self.scaler_data = None
logger.debug('The following groups are included for RGB image display: {}'.
format(self.io_model.img_dict_keys))
self.show_image()
def select_dataset(self, dataset_index):
"""
Select dataset. Meaning of the index: 0 - no dataset is selected,
1, 2, ... datasets with index 0, 1, ... is selected
Parameters
----------
dataset_index: int
index of the selected dataset
"""
self.data_opt = dataset_index
try:
if self.data_opt == 0:
self.dict_to_plot = {}
self.map_keys.clear()
self.init_limits_and_stat()
self.img_title = ''
elif self.data_opt > 0:
plot_item = self._get_current_plot_item()
self.img_title = str(plot_item)
self.dict_to_plot = self.io_model.img_dict[plot_item]
# for GUI purpose only
self.set_map_keys()
self.init_limits_and_stat()
# Select the first 3 entries for RGB display
for n in range(min(len(self.rgb_keys), len(self.map_keys))):
self.rgb_dict[self.rgb_keys[n]] = self.map_keys[n]
except IndexError:
pass
# Redraw image
self.show_image()
def set_map_keys(self):
"""
Create sorted list of map keys. The list starts with sorted sequence of emission lines,
followed by the sorted list of scalers and other maps.
"""
self.map_keys.clear()
# The key to use with 'img_dict', the name of the current dataset.
plot_item = self._get_current_plot_item()
keys_unsorted = list(self.io_model.img_dict[plot_item].keys())
if len(keys_unsorted) != len(set(keys_unsorted)):
logger.warning("DrawImageAdvanced:set_map_keys(): repeated keys "
f"in the dictionary 'img_dict': {keys_unsorted}")
keys_elines, keys_scalers = [], []
for key in keys_unsorted:
if check_if_eline_supported(key): # Check if 'key' is an emission line (such as "Ca_K")
keys_elines.append(key)
else:
keys_scalers.append(key)
keys_elines.sort()
keys_scalers.sort()
self.map_keys = keys_elines + keys_scalers
def get_selected_scaler_name(self):
if self.scaler_name_index == 0:
return None
else:
return self.scaler_items[self.scaler_name_index - 1]
def set_scaler_index(self, scaler_index):
self.scaler_name_index = scaler_index
if self.scaler_name_index == 0:
self.scaler_data = None
else:
try:
scaler_name = self.scaler_items[self.scaler_name_index-1]
except IndexError:
scaler_name = None
if scaler_name:
self.scaler_data = self.scaler_norm_dict[scaler_name]
logger.info('Use scaler data to normalize, '
'and the shape of scaler data is {}, '
'with (low, high) as ({}, {})'.format(self.scaler_data.shape,
np.min(self.scaler_data),
np.max(self.scaler_data)))
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def _get_current_plot_item(self):
"""Get the key for the current plot item (use in dictionary 'img_dict')"""
return self.io_model.img_dict_keys[self.data_opt - 1]
def set_pixel_or_pos(self, pixel_or_pos):
self.pixel_or_pos = pixel_or_pos
self.show_image()
def set_grid_interpolate(self, grid_interpolate):
self.grid_interpolate = grid_interpolate
self.show_image()
def enable_quantitative_normalization(self, enable):
"""
Enable/Disable quantitative normalization.
Parameters
----------
enable: bool
Enable quantitative normalization if True, disable if False.
"""
self.quantitative_normalization = bool(enable)
self.set_low_high_value() # reset low high values based on normalization
self.show_image()
def set_low_high_value(self):
"""Set default low and high values based on normalization for each image.
"""
# do not apply scaler norm on not scalable data
self.range_dict.clear()
for data_name in self.dict_to_plot.keys():
if self.quantitative_normalization:
# Quantitative normalization
data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[data_name],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=data_name,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(data_in=self.dict_to_plot[data_name],
scaler=self.scaler_data,
data_name=data_name,
name_not_scalable=self.name_not_scalable)
lowv, highv = np.min(data_arr), np.max(data_arr)
# Create some 'artificially' small range in case the array is constant
if lowv == highv:
lowv -= 0.005
highv += 0.005
self.range_dict[data_name] = {'low': lowv, 'low_default': lowv,
'high': highv, 'high_default': highv}
def reset_low_high(self, name):
"""Reset low and high value to default based on normalization.
"""
self.range_dict[name]['low'] = self.range_dict[name]['low_default']
self.range_dict[name]['high'] = self.range_dict[name]['high_default']
self.limit_dict[name]['low'] = 0.0
self.limit_dict[name]['high'] = 100.0
self.show_image()
def _init_rgb_dict(self):
self.rgb_dict = {_: None for _ in self.rgb_keys}
def init_limits_and_stat(self):
"""
Set plotting status for all the 2D images.
Note: 'self.map_keys' must be updated before calling this function!
"""
self.stat_dict.clear()
self.stat_dict = {k: False for k in self.map_keys}
self._init_rgb_dict()
self.limit_dict.clear()
self.limit_dict = {k: {'low': 0.0, 'high': 100.0} for k in self.map_keys}
self.set_low_high_value()
def preprocess_data(self):
"""
Normalize data or prepare for linear/log plot.
"""
selected_data = []
selected_name = []
quant_norm_applied = []
rgb_color_to_keys = self.get_rgb_items_for_plot()
for data_key in rgb_color_to_keys.values():
if data_key in self.dict_to_plot:
selected_name.append(data_key)
if self.scaler_data is not None:
if np.count_nonzero(self.scaler_data) == 0:
logger.warning('scaler is zero - scaling was not applied')
elif len(self.scaler_data[self.scaler_data == 0]) > 0:
logger.warning('scaler data has zero values')
for i, k in enumerate(selected_name):
q_norm_applied = False
if self.quantitative_normalization:
# Quantitative normalization
data_arr, q_norm_applied = \
self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[k],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=k,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(data_in=self.dict_to_plot[k],
scaler=self.scaler_data,
data_name=k,
name_not_scalable=self.name_not_scalable)
selected_data.append(data_arr)
quant_norm_applied.append(q_norm_applied)
return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied
def show_image(self):
# Don't plot the image if dictionary is empty (causes a lot of issues)
if not self.io_model.img_dict:
return
self.fig.clf()
self.ax = self.fig.add_subplot(111)
self.ax_r, self.ax_g, self.ax_b = make_rgb_axes(self.ax, pad=0.02)
# Check if positions data is available. Positions data may be unavailable
# (not recorded in HDF5 file) if experiment is has not been completed.
# While the data from the completed part of experiment may still be used,
# plotting vs. x-y or scatter plot may not be displayed.
positions_data_available = False
if 'positions' in self.io_model.img_dict.keys():
positions_data_available = True
# Create local copy of self.pixel_or_pos and self.grid_interpolate
pixel_or_pos_local = self.pixel_or_pos
grid_interpolate_local = self.grid_interpolate
# Disable plotting vs x-y coordinates if 'positions' data is not available
if not positions_data_available:
if pixel_or_pos_local:
pixel_or_pos_local = 0 # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Plotting vs. x-y coordinates is disabled")
if grid_interpolate_local:
grid_interpolate_local = False # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Interpolation is disabled.")
selected_data, selected_names, rgb_color_to_keys, quant_norm_applied = self.preprocess_data()
selected_data = np.asarray(selected_data)
# Hide unused axes
if rgb_color_to_keys["red"] is None:
self.ax_r.set_visible(False)
if rgb_color_to_keys["green"] is None:
self.ax_g.set_visible(False)
if rgb_color_to_keys["blue"] is None:
self.ax_b.set_visible(False)
if selected_data.ndim != 3:
# There is no data to display. Hide the last axis and exit
self.ax.set_visible(False)
return
def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
"""
Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
always equal and fit the maximum of the ranges for x and y values:
max(abs(x_max-x_min), abs(y_max-y_min))
The ranges are set so that the data is always centered in the middle of the ranges
Parameters
----------
x_min, x_max, y_min, y_max : float
lower and upper boundaries of the x and y values
Returns
-------
x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
lower and upper boundaries of the x- and y-axes ranges
"""
x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
if x_range > y_range:
y_center = (y_max + y_min) / 2
y_axis_max = y_center + x_range / 2
y_axis_min = y_center - x_range / 2
else:
x_center = (x_max + x_min) / 2
x_axis_max = x_center + y_range / 2
x_axis_min = x_center - y_range / 2
return x_axis_min, x_axis_max, y_axis_min, y_axis_max
def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):
"""
Adjust the range for plotted data along one axis (x or y). The adjusted range is
applied to the 'extent' attribute of imshow(). The adjusted range is always greater
than 'axis_range * min_ratio'. Such transformation has no physical meaning
and performed for aesthetic reasons: stretching the image presentation of
a scan with only a few lines (1-3) greatly improves visibility of data.
Parameters
----------
c_min, c_max : float
boundaries of the data range (along x or y axis)
c_axis_range : float
range presented along the same axis
Returns
-------
cmin, c_max : float
adjusted boundaries of the data range
"""
c_range = c_max - c_min
if c_range < c_axis_range * min_ratio:
c_center = (c_max + c_min) / 2
c_new_range = c_axis_range * min_ratio
c_min = c_center - c_new_range / 2
c_max = c_center + c_new_range / 2
return c_min, c_max
if pixel_or_pos_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = self.io_model.img_dict['positions']['x_pos']
y_pos_2D = self.io_model.img_dict['positions']['y_pos']
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# Adjust the direction of each axis depending on the direction in which encoder values changed
# during the experiment. Data is plotted starting from the upper-right corner of the plot
if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min
else:
if selected_data.ndim == 3:
# Set equal ranges for the axes data
yd, xd = selected_data.shape[1], selected_data.shape[2]
xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
# Select minimum range for data
if (yd <= math.floor(xd / 100)) and (xd >= 200):
yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
if (xd <= math.floor(yd / 100)) and (yd >= 200):
xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
name_r, data_r, limits_r = "", None, {"low": 0, "high": 100.0}
name_g, data_g, limits_g = "", None, {"low": 0, "high": 100.0}
name_b, data_b, limits_b = "", None, {"low": 0, "high": 100.0}
for color, name in rgb_color_to_keys.items():
if name:
try:
ind = selected_names.index(name)
name_label = name
if quant_norm_applied[ind]:
name_label += " - Q" # Add suffix to name if quantitative normalization was applied
if color == "red":
name_r, data_r = name_label, selected_data[ind]
limits_r = self.limit_dict[name]
elif color == "green":
name_g, data_g = name_label, selected_data[ind]
limits_g = self.limit_dict[name]
elif color == "blue":
name_b, data_b = name_label, selected_data[ind]
limits_b = self.limit_dict[name]
except ValueError:
pass
def _norm_data(data):
"""
Normalize data between (0, 1).
Parameters
----------
data : 2D array
"""
if data is None:
return data
data_min = np.min(data)
c_norm = np.max(data) - data_min
return (data - data_min) / c_norm if (c_norm != 0) else (data - data_min)
def _stretch_range(data_in, v_low, v_high):
# 'data is already normalized, so that the values are in the range 0..1
# v_low, v_high are in the range 0..100
if data_in is None:
return data_in
if (v_low <= 0) and (v_high >= 100):
return data_in
if v_high - v_low < 1: # This should not happen in practice, but check just in case
v_high = v_low + 1
v_low, v_high = v_low / 100.0, v_high / 100.0
c = 1.0 / (v_high - v_low)
data_out = (data_in - v_low) * c
return np.clip(data_out, 0, 1.0)
# Interpolate non-uniformly spaced data to uniform grid
if grid_interpolate_local:
data_r, _, _ = grid_interpolate(data_r,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
data_g, _, _ = grid_interpolate(data_g,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
data_b, _, _ = grid_interpolate(data_b,
self.io_model.img_dict['positions']['x_pos'],
self.io_model.img_dict['positions']['y_pos'])
# The dictionaries 'rgb_view_data' and 'pos_limits' are used for monitoring
# the map values at current cursor positions.
rgb_view_data = {_: None for _ in self.rgb_keys}
if data_r is not None:
rgb_view_data["red"] = data_r
if data_g is not None:
rgb_view_data["green"] = data_g
if data_b is not None:
rgb_view_data["blue"] = data_b
pos_limits = {"x_low": xd_min, "x_high": xd_max,
"y_low": yd_min, "y_high": yd_max}
# Normalize data
data_r_norm = _norm_data(data_r)
data_g_norm = _norm_data(data_g)
data_b_norm = _norm_data(data_b)
data_r_norm = _stretch_range(data_r_norm, limits_r['low'], limits_r['high'])
data_g_norm = _stretch_range(data_g_norm, limits_g['low'], limits_g['high'])
data_b_norm = _stretch_range(data_b_norm, limits_b['low'], limits_b['high'])
R, G, B, RGB = make_cube(data_r_norm, data_g_norm, data_b_norm)
red_patch = mpatches.Patch(color='red', label=name_r)
green_patch = mpatches.Patch(color='green', label=name_g)
blue_patch = mpatches.Patch(color='blue', label=name_b)
def format_coord_func(x, y, *, pixel_or_pos, rgb_color_to_keys,
rgb_view_data, pos_limits, colors=None):
x0, y0 = pos_limits["x_low"], pos_limits["y_low"]
if colors is None:
colors = list(rgb_color_to_keys.keys())
s = ""
for n, color in enumerate(self.rgb_keys):
if (color not in colors) or (rgb_color_to_keys[color] is None) \
or (rgb_view_data[color] is None):
continue
map = rgb_view_data[color]
ny, nx = map.shape
dy = (pos_limits["y_high"] - y0) / ny if ny else 0
dx = (pos_limits["x_high"] - x0) / nx if nx else 0
cy = 1 / dy if dy else 1
cx = 1 / dx if dx else 1
x_pixel = math.floor((x - x0) * cx)
y_pixel = math.floor((y - y0) * cy)
if (0 <= x_pixel < nx) and (0 <= y_pixel < ny):
# The following line is extremely useful for debugging the feature. Keep it.
# s += f" <b>{rgb_color_to_keys[color]}</b>: {x_pixel} {y_pixel}"
s += f" <b>{rgb_color_to_keys[color]}</b>: {map[y_pixel, x_pixel]:.5g}"
s = " - " + s if s else s # Add dash if something is to be printed
if pixel_or_pos:
# Spatial coordinates (double)
s_coord = f"({x:.5g}, {y:.5g})"
else:
# Pixel coordinates (int)
s_coord = f"({int(x)}, {int(y)})"
return s_coord + s
format_coord = partial(format_coord_func,
pixel_or_pos=pixel_or_pos_local,
rgb_color_to_keys=rgb_color_to_keys,
rgb_view_data=rgb_view_data,
pos_limits=pos_limits)
def format_cursor_data(data):
return "" # Print nothing
kwargs = dict(origin="upper", interpolation="nearest", extent=(xd_min, xd_max, yd_max, yd_min))
if RGB is not None:
img = self.ax.imshow(RGB, **kwargs)
self.ax.format_coord = format_coord
img.format_cursor_data = format_cursor_data
self.ax.set_xlim(xd_axis_min, xd_axis_max)
self.ax.set_ylim(yd_axis_max, yd_axis_min)
if R is not None:
img = self.ax_r.imshow(R, **kwargs)
self.ax_r.set_xlim(xd_axis_min, xd_axis_max)
self.ax_r.set_ylim(yd_axis_max, yd_axis_min)
format_coord_r = partial(format_coord, colors=["red"])
self.ax_r.format_coord = format_coord_r
img.format_cursor_data = format_cursor_data
if G is not None:
img = self.ax_g.imshow(G, **kwargs)
self.ax_g.set_xlim(xd_axis_min, xd_axis_max)
self.ax_g.set_ylim(yd_axis_max, yd_axis_min)
format_coord_g = partial(format_coord, colors=["green"])
self.ax_g.format_coord = format_coord_g
img.format_cursor_data = format_cursor_data
if B is not None:
img = self.ax_b.imshow(B, **kwargs)
self.ax_b.set_xlim(xd_axis_min, xd_axis_max)
self.ax_b.set_ylim(yd_axis_max, yd_axis_min)
format_coord_b = partial(format_coord, colors=["blue"])
self.ax_b.format_coord = format_coord_b
img.format_cursor_data = format_cursor_data
self.ax.xaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
self.ax.yaxis.set_major_locator(mticker.MaxNLocator(nbins="auto"))
plt.setp(self.ax_r.get_xticklabels(), visible=False)
plt.setp(self.ax_r.get_yticklabels(), visible=False)
plt.setp(self.ax_g.get_xticklabels(), visible=False)
plt.setp(self.ax_g.get_yticklabels(), visible=False)
plt.setp(self.ax_b.get_xticklabels(), visible=False)
plt.setp(self.ax_b.get_yticklabels(), visible=False)
# self.ax_r.set_xticklabels([])
# self.ax_r.set_yticklabels([])
# sb_x = 38
# sb_y = 46
# sb_length = 10
# sb_height = 1
# ax.add_patch(mpatches.Rectangle(( sb_x, sb_y), sb_length, sb_height, color='white'))
# ax.text(sb_x + sb_length /2, sb_y - 1*sb_height, '100 nm', color='w', ha='center',
# va='bottom', backgroundcolor='black', fontsize=18)
self.ax_r.legend(loc="upper left", bbox_to_anchor=(1.1, 0), frameon=False,
handles=[red_patch, green_patch, blue_patch], mode="expand")
# self.fig.tight_layout(pad=4.0, w_pad=0.8, h_pad=0.8)
# self.fig.tight_layout()
# self.fig.canvas.draw_idle()
# self.fig.suptitle(self.img_title, fontsize=20)
self.fig.canvas.draw_idle()
def get_selected_items_for_plot(self):
"""Collect the selected items for plotting.
"""
# We want the dictionary to be sorted the same way as 'map_keys'
sdict = self.stat_dict
selected_keys = [_ for _ in self.map_keys if (_ in sdict) and (sdict[_] is True)]
return selected_keys
def get_rgb_items_for_plot(self):
# Verify integrity of the dictionary
if len(self.rgb_dict) != 3:
raise ValueError("DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' has "
f"{len(self.rgb_dict)} elements. Expected number of elements: "
f"{len(self.rgb_keys)}.")
for key in self.rgb_keys:
if key not in self.rgb_dict:
raise ValueError("DrawImageRGB.get_rgb_items_for_plot: dictionary 'rgb_dict' is "
f"incomplete or contains incorrect set of keys: {list(self.rgb_dict.keys())}. "
f"Expected keys: {self.rgb_keys}: ")
return self.rgb_dict
def make_cube(r, g, b):
"""
Create 3D array for rgb image.
Parameters
----------
r : 2D array
g : 2D array
b : 2D array
"""
if r is None and g is None and b is None:
logger.error("'make_cube': 'r', 'g' and 'b' input arrays are all None")
R, G, B, RGB = None
else:
for arr in [r, g, b]:
if arr is not None:
ny, nx = arr.shape
break
R = np.zeros([ny, nx, 3])
R[:, :, 0] = r
G = np.zeros_like(R)
G[:, :, 1] = g
B = np.zeros_like(R)
B[:, :, 2] = b
RGB = R + G + B
return R, G, B, RGB
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
examples/classification/plot_lda.py
|
164
|
2224
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
UDST/activitysim
|
activitysim/abm/models/util/vectorize_tour_scheduling.py
|
2
|
32267
|
# ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
import logging
import numpy as np
import pandas as pd
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core import config
from activitysim.core import tracing
from activitysim.core import inject
from activitysim.core import mem
from activitysim.core import chunk
from activitysim.core import simulate
from activitysim.core import assign
from activitysim.core import logit
from activitysim.core import timetable as tt
from activitysim.core.util import reindex
from . import expressions
from . import mode
logger = logging.getLogger(__name__)
def get_logsum_spec(model_settings):
return mode.tour_mode_choice_spec(model_settings)
def get_coeffecients_spec(model_settings):
return mode.tour_mode_choice_coeffecients_spec(model_settings)
def _compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, trace_label):
"""
compute logsums for tours using skims for alt_tdd out_period and in_period
"""
trace_label = tracing.extend_trace_label(trace_label, 'logsums')
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
choosers = alt_tdd.join(tours_merged, how='left', rsuffix='_chooser')
logger.info("%s compute_logsums for %d choosers%s alts" %
(trace_label, choosers.shape[0], alt_tdd.shape[0]))
# - setup skims
skim_dict = inject.get_injectable('skim_dict')
skim_stack = inject.get_injectable('skim_stack')
orig_col_name = 'TAZ'
dest_col_name = model_settings.get('DESTINATION_FOR_TOUR_PURPOSE').get(tour_purpose)
odt_skim_stack_wrapper = skim_stack.wrap(left_key=orig_col_name, right_key=dest_col_name,
skim_key='out_period')
dot_skim_stack_wrapper = skim_stack.wrap(left_key=dest_col_name, right_key=orig_col_name,
skim_key='in_period')
od_skim_stack_wrapper = skim_dict.wrap(orig_col_name, dest_col_name)
skims = {
"odt_skims": odt_skim_stack_wrapper,
"dot_skims": dot_skim_stack_wrapper,
"od_skims": od_skim_stack_wrapper,
'orig_col_name': orig_col_name,
'dest_col_name': dest_col_name,
}
# - locals_dict
constants = config.get_model_constants(logsum_settings)
omnibus_coefficient_spec = get_coeffecients_spec(logsum_settings)
coefficient_spec = omnibus_coefficient_spec[tour_purpose]
coefficients = assign.evaluate_constants(coefficient_spec, constants=constants)
locals_dict = {}
locals_dict.update(coefficients)
locals_dict.update(constants)
locals_dict.update(skims)
# - run preprocessor to annotate choosers
# allow specification of alternate preprocessor for nontour choosers
preprocessor = model_settings.get('LOGSUM_PREPROCESSOR', 'preprocessor')
preprocessor_settings = logsum_settings[preprocessor]
if preprocessor_settings:
simulate.set_skim_wrapper_targets(choosers, skims)
expressions.assign_columns(
df=choosers,
model_settings=preprocessor_settings,
locals_dict=locals_dict,
trace_label=trace_label)
# - compute logsums
logsum_spec = get_logsum_spec(logsum_settings)
nest_spec = config.get_logit_model_settings(logsum_settings)
logsums = simulate.simple_simulate_logsums(
choosers,
logsum_spec,
nest_spec,
skims=skims,
locals_d=locals_dict,
chunk_size=0,
trace_label=trace_label)
return logsums
def compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, trace_label):
"""
Compute logsums for the tour alt_tdds, which will differ based on their different start, stop
times of day, which translate to different odt_skim out_period and in_periods.
In mtctm1, tdds are hourly, but there are only 5 skim time periods, so some of the tdd_alts
will be the same, once converted to skim time periods. With 5 skim time periods there are
15 unique out-out period pairs but 190 tdd alternatives.
For efficiency, rather compute a lot of redundant logsums, we compute logsums for the unique
(out-period, in-period) pairs and then join them back to the alt_tdds.
"""
# - in_period and out_period
assert 'out_period' not in alt_tdd
assert 'in_period' not in alt_tdd
alt_tdd['out_period'] = expressions.skim_time_period_label(alt_tdd['start'])
alt_tdd['in_period'] = expressions.skim_time_period_label(alt_tdd['end'])
alt_tdd['duration'] = alt_tdd['end'] - alt_tdd['start']
USE_BRUTE_FORCE = False
if USE_BRUTE_FORCE:
# compute logsums for all the tour alt_tdds (inefficient)
logsums = _compute_logsums(alt_tdd, tours_merged, tour_purpose, model_settings, trace_label)
return logsums
# - get list of unique (tour_id, out_period, in_period, duration) in alt_tdd_periods
# we can cut the number of alts roughly in half (for mtctm1) by conflating duplicates
index_name = alt_tdd.index.name
alt_tdd_periods = alt_tdd[['out_period', 'in_period', 'duration']]\
.reset_index().drop_duplicates().set_index(index_name)
# - compute logsums for the alt_tdd_periods
alt_tdd_periods['logsums'] = \
_compute_logsums(alt_tdd_periods, tours_merged, tour_purpose, model_settings, trace_label)
# - join the alt_tdd_period logsums to alt_tdd to get logsums for alt_tdd
logsums = pd.merge(
alt_tdd.reset_index(),
alt_tdd_periods.reset_index(),
on=[index_name, 'out_period', 'in_period', 'duration'],
how='left'
).set_index(index_name).logsums
return logsums
def get_previous_tour_by_tourid(current_tour_window_ids,
previous_tour_by_window_id,
alts):
"""
Matches current tours with attributes of previous tours for the same
person. See the return value below for more information.
Parameters
----------
current_tour_window_ids : Series
A Series of parent ids for the tours we're about make the choice for
- index should match the tours DataFrame.
previous_tour_by_window_id : Series
A Series where the index is the parent (window) id and the value is the index
of the alternatives of the scheduling.
alts : DataFrame
The alternatives of the scheduling.
Returns
-------
prev_alts : DataFrame
A DataFrame with an index matching the CURRENT tours we're making a
decision for, but with columns from the PREVIOUS tour of the person
associated with each of the CURRENT tours. Columns listed in PREV_TOUR_COLUMNS
from the alternatives will have "_previous" added as a suffix to keep
differentiated from the current alternatives that will be part of the
interaction.
"""
PREV_TOUR_COLUMNS = ['start', 'end']
previous_tour_by_tourid = \
previous_tour_by_window_id.loc[current_tour_window_ids]
previous_tour_by_tourid = alts.loc[previous_tour_by_tourid, PREV_TOUR_COLUMNS]
previous_tour_by_tourid.index = current_tour_window_ids.index
previous_tour_by_tourid.columns = [x+'_previous' for x in PREV_TOUR_COLUMNS]
return previous_tour_by_tourid
def tdd_interaction_dataset(tours, alts, timetable, choice_column, window_id_col, trace_label):
"""
interaction_sample_simulate expects
alts index same as choosers (e.g. tour_id)
name of choice column in alts
Parameters
----------
tours : pandas DataFrame
must have person_id column and index on tour_id
alts : pandas DataFrame
alts index must be timetable tdd id
timetable : TimeTable object
choice_column : str
name of column to store alt index in alt_tdd DataFrame
(since alt_tdd is duplicate index on person_id but unique on person_id,alt_id)
Returns
-------
alt_tdd : pandas DataFrame
columns: start, end , duration, <choice_column>
index: tour_id
"""
alts_ids = np.tile(alts.index, len(tours.index))
tour_ids = np.repeat(tours.index, len(alts.index))
window_row_ids = np.repeat(tours[window_id_col], len(alts.index))
alt_tdd = alts.take(alts_ids)
alt_tdd.index = tour_ids
alt_tdd[window_id_col] = window_row_ids
alt_tdd[choice_column] = alts_ids
# slice out all non-available tours
available = timetable.tour_available(alt_tdd[window_id_col], alt_tdd[choice_column])
assert available.any()
alt_tdd = alt_tdd[available]
# FIXME - don't need this any more after slicing
del alt_tdd[window_id_col]
return alt_tdd
def _schedule_tours(
tours, persons_merged, alts,
spec, logsum_tour_purpose,
model_settings,
timetable, window_id_col,
previous_tour, tour_owner_id_col,
tour_trace_label):
"""
previous_tour stores values used to add columns that can be used in the spec
which have to do with the previous tours per person. Every column in the
alternatives table is appended with the suffix "_previous" and made
available. So if your alternatives table has columns for start and end,
then start_previous and end_previous will be set to the start and end of
the most recent tour for a person. The first time through,
start_previous and end_previous are undefined, so make sure to protect
with a tour_num >= 2 in the variable computation.
Parameters
----------
tours : DataFrame
chunk of tours to schedule with unique timetable window_id_col
persons_merged : DataFrame
DataFrame of persons to be merged with tours containing attributes referenced
by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent all possible time slots.
tdd_interaction_dataset function will use timetable to filter them to omit
unavailable alternatives
spec : DataFrame
The spec which will be passed to interaction_simulate.
model_settings : dict
timetable : TimeTable
timetable of timewidows for person (or subtour) with rows for tours[window_id_col]
window_id_col : str
column name from tours that identifies timetable owner (or None if tours index)
- person_id for non/mandatory tours
- parent_tour_id for subtours,
- None (tours index) for joint_tours since every tour may have different participants)
previous_tour: Series
series with value of tdd_alt choice for last previous tour scheduled for
tour_owner_id_col : str
column name from tours that identifies 'owner' of this tour
(person_id for non/mandatory tours, parent_tour_id for subtours,
household_id for joint_tours)
tour_trace_label
Returns
-------
"""
logger.info("%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)))
# merge persons into tours
# avoid dual suffix for redundant columns names (e.g. household_id) that appear in both
tours = pd.merge(tours, persons_merged, left_on='person_id', right_index=True,
suffixes=('', '_y'))
chunk.log_df(tour_trace_label, "tours", tours)
# - add explicit window_id_col for timetable owner if it is index
# if no timetable window_id_col specified, then add index as an explicit column
# (this is not strictly necessary but its presence makes code simpler in several places)
if window_id_col is None:
window_id_col = tours.index.name
tours[window_id_col] = tours.index
# timetable can't handle multiple tours per window_id
assert not tours[window_id_col].duplicated().any()
# - build interaction dataset filtered to include only available tdd alts
# dataframe columns start, end , duration, person_id, tdd
# indexed (not unique) on tour_id
choice_column = 'tdd'
alt_tdd = tdd_interaction_dataset(tours, alts, timetable, choice_column, window_id_col,
tour_trace_label)
chunk.log_df(tour_trace_label, "alt_tdd", alt_tdd)
# - add logsums
if logsum_tour_purpose:
logsums = \
compute_logsums(alt_tdd, tours, logsum_tour_purpose, model_settings, tour_trace_label)
else:
logsums = 0
alt_tdd['mode_choice_logsum'] = logsums
# - merge in previous tour columns
# adds start_previous and end_previous, joins on index
tours = \
tours.join(get_previous_tour_by_tourid(tours[tour_owner_id_col], previous_tour, alts))
chunk.log_df(tour_trace_label, "tours", tours)
# - make choices
locals_d = {
'tt': timetable
}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_d.update(constants)
choices = interaction_sample_simulate(
tours,
alt_tdd,
spec,
choice_column=choice_column,
locals_d=locals_d,
chunk_size=0,
trace_label=tour_trace_label
)
# - update previous_tour and timetable parameters
# update previous_tour (series with most recent previous tdd choices) with latest values
previous_tour.loc[tours[tour_owner_id_col]] = choices.values
# update timetable with chosen tdd footprints
timetable.assign(tours[window_id_col], choices)
return choices
def calc_rows_per_chunk(chunk_size, tours, persons_merged, alternatives, trace_label=None):
num_choosers = len(tours.index)
# if not chunking, then return num_choosers
# if chunk_size == 0:
# return num_choosers, 0
chooser_row_size = tours.shape[1]
sample_size = alternatives.shape[0]
# persons_merged columns plus 2 previous tour columns
extra_chooser_columns = persons_merged.shape[1] + 2
# one column per alternative plus skim and join columns
alt_row_size = alternatives.shape[1] + 2
row_size = (chooser_row_size + extra_chooser_columns + alt_row_size) * sample_size
# logger.debug("%s #chunk_calc choosers %s" % (trace_label, tours.shape))
# logger.debug("%s #chunk_calc extra_chooser_columns %s" % (trace_label, extra_chooser_columns))
# logger.debug("%s #chunk_calc alternatives %s" % (trace_label, alternatives.shape))
# logger.debug("%s #chunk_calc alt_row_size %s" % (trace_label, alt_row_size))
return chunk.rows_per_chunk(chunk_size, row_size, num_choosers, trace_label)
def schedule_tours(
tours, persons_merged, alts,
spec, logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour, tour_owner_id_col,
chunk_size, tour_trace_label):
"""
chunking wrapper for _schedule_tours
While interaction_sample_simulate provides chunking support, the merged tours, persons
dataframe and the tdd_interaction_dataset are very big, so we want to create them inside
the chunking loop to minimize memory footprint. So we implement the chunking loop here,
and pass a chunk_size of 0 to interaction_sample_simulate to disable its chunking support.
"""
if not tours.index.is_monotonic_increasing:
logger.info("schedule_tours %s tours not monotonic_increasing - sorting df")
tours = tours.sort_index()
logger.info("%s schedule_tours running %d tour choices" % (tour_trace_label, len(tours)))
# no more than one tour per timetable_window per call
if timetable_window_id_col is None:
assert not tours.index.duplicated().any()
else:
assert not tours[timetable_window_id_col].duplicated().any()
rows_per_chunk, effective_chunk_size = \
calc_rows_per_chunk(chunk_size, tours, persons_merged, alts, trace_label=tour_trace_label)
result_list = []
for i, num_chunks, chooser_chunk \
in chunk.chunked_choosers(tours, rows_per_chunk):
logger.info("Running chunk %s of %s size %d" % (i, num_chunks, len(chooser_chunk)))
chunk_trace_label = tracing.extend_trace_label(tour_trace_label, 'chunk_%s' % i) \
if num_chunks > 1 else tour_trace_label
chunk.log_open(chunk_trace_label, chunk_size, effective_chunk_size)
choices = _schedule_tours(chooser_chunk, persons_merged,
alts, spec, logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour, tour_owner_id_col,
tour_trace_label=chunk_trace_label)
chunk.log_close(chunk_trace_label)
result_list.append(choices)
mem.force_garbage_collect()
# FIXME: this will require 2X RAM
# if necessary, could append to hdf5 store on disk:
# http://pandas.pydata.org/pandas-docs/stable/io.html#id2
if len(result_list) > 1:
choices = pd.concat(result_list)
assert len(choices.index == len(tours.index))
return choices
def vectorize_tour_scheduling(tours, persons_merged, alts,
spec, segment_col,
model_settings,
chunk_size=0, trace_label=None):
"""
The purpose of this method is fairly straightforward - it takes tours
and schedules them into time slots. Alternatives should be specified so
as to define those time slots (usually with start and end times).
schedule_tours adds variables that can be used in the spec which have
to do with the previous tours per person. Every column in the
alternatives table is appended with the suffix "_previous" and made
available. So if your alternatives table has columns for start and end,
then start_previous and end_previous will be set to the start and end of
the most recent tour for a person. The first time through,
start_previous and end_previous are undefined, so make sure to protect
with a tour_num >= 2 in the variable computation.
Parameters
----------
tours : DataFrame
DataFrame of tours containing tour attributes, as well as a person_id
column to define the nth tour for each person.
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(or dict of specs keyed on tour_type if tour_types is not None)
model_settings : dict
Returns
-------
choices : Series
A Series of choices where the index is the index of the tours
DataFrame and the values are the index of the alts DataFrame.
timetable : TimeTable
persons timetable updated with tours (caller should replace_table for it to persist)
"""
trace_label = tracing.extend_trace_label(trace_label, 'vectorize_tour_scheduling')
assert len(tours.index) > 0
assert 'tour_num' in tours.columns
assert 'tour_type' in tours.columns
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
timetable = inject.get_injectable("timetable")
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_personid = pd.Series(alts.index[0], index=tours.person_id.unique())
timetable_window_id_col = 'person_id'
tour_owner_id_col = 'person_id'
# no more than one tour per person per call to schedule_tours
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# segregate scheduling by tour_type if multiple specs passed in dict keyed by tour_type
for tour_num, nth_tours in tours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, 'tour_%s' % (tour_num,))
if isinstance(spec, dict):
assert segment_col is not None
for spec_segment in spec:
segment_trace_label = tracing.extend_trace_label(tour_trace_label, spec_segment)
in_segment = nth_tours[segment_col] == spec_segment
if not in_segment.any():
logger.info("skipping empty segment %s")
continue
# assume segmentation of spec and logsum coefficients are aligned
logsum_tour_purpose = spec_segment
choices = \
schedule_tours(nth_tours[in_segment],
persons_merged, alts,
spec[spec_segment], logsum_tour_purpose,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_personid, tour_owner_id_col,
chunk_size,
segment_trace_label)
choice_list.append(choices)
else:
# unsegmented spec dict indicates no logsums
# caller could use single-element spec dict if logsum support desired,
# but this case nor required for mtctm1
assert segment_col is None
logsum_segment = None
choices = \
schedule_tours(nth_tours,
persons_merged, alts,
spec, logsum_segment,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_personid, tour_owner_id_col,
chunk_size,
tour_trace_label)
choice_list.append(choices)
choices = pd.concat(choice_list)
# add the start, end, and duration from tdd_alts
# use np instead of (slower) loc[] since alts has rangeindex
tdd = pd.DataFrame(data=alts.values[choices.values],
columns=alts.columns,
index=choices.index)
# tdd = alts.loc[choices]
# tdd.index = choices.index
# include the index of the choice in the tdd alts table
tdd['tdd'] = choices
return tdd, timetable
def vectorize_subtour_scheduling(parent_tours, subtours, persons_merged, alts, spec,
model_settings,
chunk_size=0, trace_label=None):
"""
Like vectorize_tour_scheduling but specifically for atwork subtours
subtours have a few peculiarities necessitating separate treatment:
Timetable has to be initialized to set all timeperiods outside parent tour footprint as
unavailable. So atwork subtour timewindows are limited to the foorprint of the parent work
tour. And parent_tour_id' column of tours is used instead of parent_id as timetable row_id.
Parameters
----------
parent_tours : DataFrame
parent tours of the subtours (because we need to know the tdd of the parent tour to
assign_subtour_mask of timetable indexed by parent_tour id
subtours : DataFrame
atwork subtours to schedule
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(all subtours share same spec regardless of subtour type)
model_settings : dict
chunk_size
trace_label
Returns
-------
choices : Series
A Series of choices where the index is the index of the subtours
DataFrame and the values are the index of the alts DataFrame.
"""
if not trace_label:
trace_label = 'vectorize_non_mandatory_tour_scheduling'
assert len(subtours.index) > 0
assert 'tour_num' in subtours.columns
assert 'tour_type' in subtours.columns
timetable_window_id_col = 'parent_tour_id'
tour_owner_id_col = 'parent_tour_id'
segment = None
# timetable with a window for each parent tour
parent_tour_windows = tt.create_timetable_windows(parent_tours, alts)
timetable = tt.TimeTable(parent_tour_windows, alts)
# mask the periods outside parent tour footprint
timetable.assign_subtour_mask(parent_tours.tour_id, parent_tours.tdd)
# print timetable.windows
"""
[[7 7 7 0 0 0 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7]
[7 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7]
[7 7 7 7 7 0 0 0 0 0 0 0 0 0 0 7 7 7 7 7 7]
[7 7 0 0 0 0 0 0 0 7 7 7 7 7 7 7 7 7 7 7 7]]
"""
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_parent_tour_id = \
pd.Series(alts.index[0], index=subtours['parent_tour_id'].unique())
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
for tour_num, nth_tours in subtours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, 'tour_%s' % (tour_num,))
# no more than one tour per timetable window per call to schedule_tours
assert not nth_tours.parent_tour_id.duplicated().any()
choices = \
schedule_tours(nth_tours,
persons_merged, alts,
spec, segment,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_parent_tour_id, tour_owner_id_col,
chunk_size, tour_trace_label)
choice_list.append(choices)
choices = pd.concat(choice_list)
# add the start, end, and duration from tdd_alts
# assert (alts.index == list(range(alts.shape[0]))).all()
tdd = pd.DataFrame(data=alts.values[choices.values],
columns=alts.columns,
index=choices.index)
# tdd = alts.loc[choices]
# tdd.index = choices.index
# include the index of the choice in the tdd alts table
tdd['tdd'] = choices
# print "\nfinal timetable.windows\n", timetable.windows
"""
[[7 7 7 0 0 0 0 2 7 7 4 7 7 7 7 7 7 7 7 7 7]
[7 0 2 7 4 0 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7]
[7 7 7 7 7 2 4 0 0 0 0 0 0 0 0 7 7 7 7 7 7]
[7 7 0 2 7 7 4 0 0 7 7 7 7 7 7 7 7 7 7 7 7]]
"""
# we dont need to call replace_table() for this nonce timetable
# because subtours are occuring during persons timetable scheduled time
return tdd
def build_joint_tour_timetables(joint_tours, joint_tour_participants, persons_timetable, alts):
# timetable with a window for each joint tour
joint_tour_windows_df = tt.create_timetable_windows(joint_tours, alts)
joint_tour_timetable = tt.TimeTable(joint_tour_windows_df, alts)
for participant_num, nth_participants in \
joint_tour_participants.groupby('participant_num', sort=True):
# nth_participant windows from persons_timetable
participant_windows = persons_timetable.slice_windows_by_row_id(nth_participants.person_id)
# assign them joint_tour_timetable
joint_tour_timetable.assign_footprints(nth_participants.tour_id, participant_windows)
return joint_tour_timetable
def vectorize_joint_tour_scheduling(
joint_tours, joint_tour_participants,
persons_merged, alts, spec,
model_settings,
chunk_size=0, trace_label=None):
"""
Like vectorize_tour_scheduling but specifically for joint tours
joint tours have a few peculiarities necessitating separate treatment:
Timetable has to be initialized to set all timeperiods...
Parameters
----------
tours : DataFrame
DataFrame of tours containing tour attributes, as well as a person_id
column to define the nth tour for each person.
persons_merged : DataFrame
DataFrame of persons containing attributes referenced by expressions in spec
alts : DataFrame
DataFrame of alternatives which represent time slots. Will be passed to
interaction_simulate in batches for each nth tour.
spec : DataFrame
The spec which will be passed to interaction_simulate.
(or dict of specs keyed on tour_type if tour_types is not None)
model_settings : dict
Returns
-------
choices : Series
A Series of choices where the index is the index of the tours
DataFrame and the values are the index of the alts DataFrame.
persons_timetable : TimeTable
timetable updated with joint tours (caller should replace_table for it to persist)
"""
trace_label = tracing.extend_trace_label(trace_label, 'vectorize_joint_tour_scheduling')
assert len(joint_tours.index) > 0
assert 'tour_num' in joint_tours.columns
assert 'tour_type' in joint_tours.columns
timetable_window_id_col = None
tour_owner_id_col = 'household_id'
segment = None
persons_timetable = inject.get_injectable("timetable")
choice_list = []
# keep a series of the the most recent tours for each person
# initialize with first trip from alts
previous_tour_by_householdid = pd.Series(alts.index[0], index=joint_tours.household_id.unique())
# tours must be scheduled in increasing trip_num order
# second trip of type must be in group immediately following first
# this ought to have been ensured when tours are created (tour_frequency.process_tours)
# print "participant windows before scheduling\n", \
# persons_timetable.slice_windows_by_row_id(joint_tour_participants.person_id)
for tour_num, nth_tours in joint_tours.groupby('tour_num', sort=True):
tour_trace_label = tracing.extend_trace_label(trace_label, 'tour_%s' % (tour_num,))
# no more than one tour per household per call to schedule_tours
assert not nth_tours.household_id.duplicated().any()
nth_participants = \
joint_tour_participants[joint_tour_participants.tour_id.isin(nth_tours.index)]
timetable = build_joint_tour_timetables(
nth_tours, nth_participants,
persons_timetable, alts)
choices = \
schedule_tours(nth_tours,
persons_merged, alts,
spec, segment,
model_settings,
timetable, timetable_window_id_col,
previous_tour_by_householdid, tour_owner_id_col,
chunk_size, tour_trace_label)
# - update timetables of all joint tour participants
persons_timetable.assign(
nth_participants.person_id,
reindex(choices, nth_participants.tour_id))
choice_list.append(choices)
choices = pd.concat(choice_list)
# add the start, end, and duration from tdd_alts
# assert (alts.index == list(range(alts.shape[0]))).all()
tdd = pd.DataFrame(data=alts.values[choices.values],
columns=alts.columns,
index=choices.index)
# tdd = alts.loc[choices]
# tdd.index = choices.index
tdd.index = choices.index
# include the index of the choice in the tdd alts table
tdd['tdd'] = choices
# print "participant windows after scheduling\n", \
# persons_timetable.slice_windows_by_row_id(joint_tour_participants.person_id)
return tdd, persons_timetable
|
bsd-3-clause
|
spallavolu/scikit-learn
|
examples/covariance/plot_covariance_estimation.py
|
250
|
5070
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
|
bsd-3-clause
|
aetilley/scikit-learn
|
sklearn/kernel_ridge.py
|
155
|
6545
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
mihail911/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py
|
69
|
16818
|
"""
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
|
gpl-3.0
|
TrafficSenseMSD/core
|
misc/figuring_out_traci/san_pablo_dam/data/analyzeData.py
|
1
|
4191
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2008-2017 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# @file analyzeData.py
# @author Daniel Krajzewicz
# @author Laura Bieker
# @date 2011-09-30
# @version $Id: analyzeData.py 26301 2017-10-02 20:48:38Z behrisch $
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import math
import numpy as np
def getAttr(line, which):
beg = line.find(which)
beg = line.find('"', beg)
end = line.find('"', beg + 1)
return line[beg + 1:end]
# this is from here: http://code.activestate.com/recipes/389639
class Ddict(dict):
def __init__(self, default=None):
self.default = default
def __getitem__(self, key):
if key not in self:
self[key] = self.default()
return dict.__getitem__(self, key)
# os.system('run-an-external-command')
# os.getcwd()
# os.chdir()
f = open(sys.argv[1], 'r')
data = f.readlines()
f.close()
dd = Ddict(lambda: Ddict(lambda: 0))
# f1 = open('raw-results.txt','w')
f1 = open('tmp.txt', 'w')
for i in range(1, len(data)):
if data[i].find('<interval') != -1:
ll = data[i].split('"')
nn = int(getAttr(data[i], "nVehContrib")) # int(ll[7])
lane = int(getAttr(data[i], "id")[-1:]) # int(ll[5])
tt = float(getAttr(data[i], "begin")) # float(ll[1])
itt = int(tt)
if nn > 0:
print(tt, lane, nn, ll[9], ll[11], ll[13], ll[15], file=f1)
dd[itt][lane] = nn
f1.close()
maxLanes = 0
dt2OneHour = 6.0
for t in dd.iterkeys():
if len(dd[t]) > maxLanes:
maxLanes = len(dd[t])
tVec = np.zeros(len(dd), dtype=int)
QVec = np.zeros(len(dd), dtype=int)
xVec = np.zeros((len(dd), maxLanes), dtype=float)
qVec = np.zeros((len(dd), maxLanes), dtype=float)
vecIndx = 0
f = open('lane-shares.txt', 'w')
# for t,v in dd.items():
for t in sorted(dd.iterkeys()):
# qTot = math.fsum(dd[t])
qTot = sum(dd[t].values())
nrm = 0.0
if qTot:
nrm = 1.0 / qTot
s = repr(t) + ' ' + repr(qTot) + ' '
tVec[vecIndx] = t
QVec[vecIndx] = dt2OneHour * qTot
for lane in range(maxLanes):
share = 0.0
if lane in dd[t]:
share = nrm * dd[t][lane]
s = s + repr(share) + ' '
xVec[vecIndx, lane] = share
qVec[vecIndx, lane] = dt2OneHour * dd[t][lane]
# print >> f,t,qTot,lane,share
vecIndx += 1
print(s, file=f)
f.close()
try:
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
# y =
n = len(qVec)
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(tVec, qVec[range(n), lane], label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane flows')
plt.xlabel('time [s]')
plt.legend()
bname = 'flows-over-time-' + repr(maxLanes)
plt.savefig(bname + '.eps')
plt.savefig(bname + '.pdf')
plt.savefig(bname + '.png')
plt.savefig(bname + '.svg')
# try:
# import pyemf
# plt.savefig('shares-over-time.emf')
# except :
# print '# no emf support'
# plt.show()
plt.close()
# next plot:
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(QVec, xVec[range(n), lane], 'o', markersize=10, label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane shares')
plt.xlabel('total flow [veh/h]')
plt.legend()
bname = 'shares-vs-flow-' + repr(maxLanes)
plt.savefig(bname + '.eps')
plt.savefig(bname + '.pdf')
plt.savefig(bname + '.png')
plt.savefig(bname + '.svg')
# plt.show()
plt.close()
except ImportError:
print('no matplotlib, falling back to gnuplot')
os.system('gnuplot do-some-plots.gnu')
|
epl-1.0
|
sinhrks/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
78
|
7586
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/core/indexes/category.py
|
3
|
24652
|
import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.common import (
is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.common import (_asarray_tuplesafe,
_values_from_object)
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, base.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
.. versionadded:: 0.16.1
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.categorical import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
Returns
-------
Categorical
"""
if not isinstance(data, ABCCategorical):
ordered = False if ordered is None else ordered
from pandas.core.categorical import Categorical
data = Categorical(data, categories=categories, ordered=ordered)
else:
if categories is not None:
data = data.set_categories(categories)
if ordered is not None:
data = data.set_ordered(ordered)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
**kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
**kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex,
self)._shallow_copy(values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
if self.categories._defer_to_indexing:
return key in self.categories
return key in self.values
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
if self.categories._defer_to_indexing:
return self.categories.contains(key)
return key in self.values
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex.from_intervals(np.array(self))
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = base.IndexOpsMixin.unique(self)
# CategoricalIndex._shallow_copy uses keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.categorical import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
new_target = self.take(indexer)
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
# we have the same codes
codes = target.codes
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
target = target.categories
codes = self.categories.get_indexer(target)
return self._engine.get_indexer_non_unique(codes)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = _asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
def is_dtype_equal(self, other):
return self._data.is_dtype_equal(other)
take_nd = take
def map(self, mapper):
"""Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be a CategoricalIndex
which has the same order property as the original. Otherwise,
the result will be a Index.
Returns
-------
applied : CategoricalIndex or Index
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisions must "
"have the same categories and ordered "
"attributes")
return getattr(self.values, op)(other)
return _evaluate_compare
cls.__eq__ = _make_compare('__eq__')
cls.__ne__ = _make_compare('__ne__')
cls.__lt__ = _make_compare('__lt__')
cls.__gt__ = _make_compare('__gt__')
cls.__le__ = _make_compare('__le__')
cls.__ge__ = _make_compare('__ge__')
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.categorical import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
|
mit
|
albanie/mcnFasterRCNN
|
misc/python/compute_feats.py
|
1
|
9726
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
# ----------------------------------
# bugfix
# ----------------------------------
import matplotlib
matplotlib.use('Agg')
# ----------------------------------
# PYTHONPATH hacking
# ----------------------------------
from os.path import join as pjoin
import os, sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
base = os.path.expanduser('~/coding/libs/py-faster-rcnn2')
caffe_path = pjoin(base, 'caffe-fast-rcnn/python')
zsvision_path = os.path.expanduser('~/coding/src/zsvision/python')
lib_path = pjoin(base, 'lib')
add_path(caffe_path)
add_path(lib_path)
add_path(zsvision_path)
import ipdb
import caffe, cv2
from fast_rcnn.config import cfg
from utils.blob import im_list_to_blob
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from zsvision.zs_iterm import zs_dispFig
vl_rootnn = os.path.expanduser('~/coding/libs/matconvnets/contrib-matconvnet')
im_path = pjoin(vl_rootnn, 'contrib/mcnFasterRCNN/python', '000067.jpg')
net_name = 'VGG16' ; fname = 'blobs-{}.mat'.format(net_name)
blob_save_path = pjoin(vl_rootnn, 'contrib/mcnFasterRCNN/feats', fname)
im_minus_path = pjoin(vl_rootnn, 'contrib/mcnFasterRCNN/feats', 'im-minus.mat')
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
sio.savemat(im_minus_path, {'im_minus':im_orig}, oned_as='column')
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, net.blobs
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, im_path, blob_save_path):
"""Detect object classes in an image using pre-computed object proposals."""
im = cv2.imread(im_path)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic() ;
scores, boxes, blobs = im_detect(net, im) ;
timer.toc()
blobs_data = dict()
for key in blobs:
new_key = key.replace('/', '_')
blobs_data[new_key] = blobs[key].data
sio.savemat(blob_save_path, blobs_data, oned_as='column')
msg = 'Detection took {:.3f}s for {:d} object proposals'
print(msg.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.8 ; NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
cfg.TEST.HAS_RPN = True # Use RPN for proposals
if net_name == 'VGG16':
pt = 'VGG16/faster_rcnn_alt_opt/faster_rcnn_test.pt'
model = 'faster_rcnn_models/VGG16_faster_rcnn_final.caffemodel'
elif net_name == 'ZF':
pt = 'ZF/faster_rcnn_alt_opt/faster_rcnn_test.pt'
model = 'faster_rcnn_models/ZF_faster_rcnn_final.caffemodel'
else:
raise ValueError('{} not recognised'.format(net_name))
prototxt = pjoin(cfg.MODELS_DIR, pt)
caffemodel = pjoin(cfg.DATA_DIR, model)
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
caffe.set_mode_cpu()
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
demo(net, im_path, blob_save_path)
plt.show()
zs_dispFig()
|
mit
|
bobmyhill/burnman
|
examples/example_chemical_potentials.py
|
2
|
6809
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_chemical_potentials
---------------------------
This example shows how to use the chemical potentials library of functions.
*Demonstrates:*
* How to calculate chemical potentials
* How to compute fugacities and relative fugacities
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
import burnman
import burnman.constants as constants
import burnman.processchemistry as processchemistry
import burnman.chemicalpotentials as chemical_potentials
import burnman.minerals as minerals
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
'''
Here we initialise the minerals we'll be using
'''
P = 1.e9
T = 1000.
fa = minerals.HP_2011_ds62.fa()
mt = minerals.HP_2011_ds62.mt()
qtz = minerals.HP_2011_ds62.q()
FMQ = [fa, mt, qtz]
for mineral in FMQ:
mineral.set_state(P, T)
'''
Here we find chemical potentials of FeO, SiO2 and O2 for
an assemblage containing fayalite, magnetite and quartz,
and a second assemblage of magnetite and wustite
at 1 GPa, 1000 K
'''
component_formulae = ['FeO', 'SiO2', 'O2']
component_formulae_dict = [processchemistry.dictionarize_formula(f)
for f in component_formulae]
chem_potentials = chemical_potentials.chemical_potentials(FMQ, component_formulae_dict)
oxygen = minerals.HP_2011_fluids.O2()
oxygen.set_state(P, T)
hem = minerals.HP_2011_ds62.hem()
MH = [mt, hem]
for mineral in MH:
mineral.set_state(P, T)
print('log10(fO2) at the FMQ buffer:', np.log10(chemical_potentials.fugacity(oxygen, FMQ)))
print('log10(fO2) at the mt-hem buffer:', np.log10(chemical_potentials.fugacity(oxygen, MH)))
print('Relative log10(fO2):', np.log10(chemical_potentials.relative_fugacity(oxygen, FMQ, MH)))
'''
Here we find the oxygen fugacity of the
FMQ buffer, and compare it to published values.
Fugacity is often defined relative to a material at
some fixed reference pressure (in this case, O2)
Here we use room pressure, 100 kPa
'''
# Set up arrays
temperatures = np.linspace(900., 1420., 100)
log10fO2_FMQ_ONeill1987 = np.empty_like(temperatures)
log10fO2_FMQ = np.empty_like(temperatures)
invT = np.empty_like(temperatures)
# Reference and assemblage pressure
Pr = 1.e5
P = 1.e5
for i, T in enumerate(temperatures):
# Set states
oxygen.set_state(Pr, T)
for mineral in FMQ:
mineral.set_state(P, T)
# The chemical potential and fugacity of O2 at the FMQ buffer
# according to O'Neill, 1987
muO2_FMQ_ONeill1987 = -587474. + 1584.427 * \
T - 203.3164 * T * np.log(T) + 0.092710 * T * T
log10fO2_FMQ_ONeill1987[i] = np.log10(
np.exp((muO2_FMQ_ONeill1987) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
# The calculated chemical potential and fugacity of O2 at the FMQ
# buffer
log10fO2_FMQ[i] = np.log10(chemical_potentials.fugacity(oxygen, FMQ))
# Plot the FMQ log10(fO2) values
plt.plot(temperatures, log10fO2_FMQ_ONeill1987,
'k', linewidth=1., label='FMQ (O\'Neill (1987)')
plt.plot(temperatures, log10fO2_FMQ, 'b--',
linewidth=2., label='FMQ (HP 2011 ds62)')
# Do the same for Re-ReO2
'''
Here we define two minerals, Re (rhenium) and
ReO2 (tugarinovite)
'''
class Re (burnman.Mineral):
def __init__(self):
formula = 'Re1.0'
formula = processchemistry.dictionarize_formula(formula)
self.params = {
'name': 'Re',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 36.53,
'V_0': 8.862e-06,
'Cp': [23.7, 0.005448, 68.0, 0.0],
'a_0': 1.9e-05,
'K_0': 3.6e+11,
'Kprime_0': 4.05,
'Kdprime_0': -1.1e-11,
'n': sum(formula.values()),
'molar_mass': processchemistry.formula_mass(formula)}
burnman.Mineral.__init__(self)
class ReO2 (burnman.Mineral):
def __init__(self):
formula = 'Re1.0O2.0'
formula = processchemistry.dictionarize_formula(formula)
self.params = {
'name': 'ReO2',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -445140.0,
'S_0': 47.82,
'V_0': 1.8779e-05,
'Cp': [76.89, 0.00993, -1207130.0, -208.0],
'a_0': 4.4e-05,
'K_0': 1.8e+11,
'Kprime_0': 4.05,
'Kdprime_0': -2.25e-11,
'n': sum(formula.values()),
'molar_mass': processchemistry.formula_mass(formula)}
burnman.Mineral.__init__(self)
'''
Here we find the oxygen fugacity of the Re-ReO2
buffer, and again compare it to published values.
'''
# Mineral and assemblage definitions
rhenium = Re()
rheniumIVoxide = ReO2()
ReReO2buffer = [rhenium, rheniumIVoxide]
# Set up arrays
temperatures = np.linspace(850., 1250., 100)
log10fO2_Re_PO1994 = np.empty_like(temperatures)
log10fO2_ReReO2buffer = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
# Set states
oxygen.set_state(Pr, T)
for mineral in ReReO2buffer:
mineral.set_state(P, T)
# The chemical potential and fugacity of O2 at the Re-ReO2 buffer
# according to Powncesby and O'Neill, 1994
muO2_Re_PO1994 = -451020 + 297.595 * T - 14.6585 * T * np.log(T)
log10fO2_Re_PO1994[i] = np.log10(
np.exp((muO2_Re_PO1994) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
# The chemical potential and fugacity of O2 at the Re-ReO2 buffer
log10fO2_ReReO2buffer[i] = np.log10(chemical_potentials.fugacity(oxygen, ReReO2buffer))
# Plot the Re-ReO2 log10(fO2) values
plt.plot(temperatures, log10fO2_Re_PO1994, 'k',
linewidth=1., label='Re-ReO2 (Pownceby and O\'Neill (1994)')
plt.plot(temperatures, log10fO2_ReReO2buffer,
'r--', linewidth=2., label='Re-ReO2 (HP 2011 ds62)')
plt.ylabel("log_10 (fO2)")
plt.xlabel("T (K)")
plt.legend(loc='lower right')
plt.show()
|
gpl-2.0
|
kmaehashi/jubakit
|
example/anomaly_auc.py
|
2
|
2167
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Using Anomaly and Calculating AUC Score
========================================
This is a simple example that illustrates Anomaly service usage.
"""
from jubakit.anomaly import Anomaly, Schema, Dataset, Config
from jubakit.loader.csv import CSVLoader
# Load a CSV file.
loader = CSVLoader('iris.csv')
# Define a Schema that defines types for each columns of the CSV file.
schema = Schema({
'Species': Schema.FLAG,
}, Schema.NUMBER)
# Define a function to determine if the record is positive or negative.
# In this example we treat `Iris-virginica` as an "anomaly" record.
def is_positive(x):
return x == 'Iris-virginica'
# Create a Dataset.
dataset = Dataset(loader, schema)
# Extract the negative (non-anomaly) dataset.
dataset_neg = dataset.convert(lambda data: filter(lambda x: not is_positive(x['Species']), data))
# Create an Anomaly Service.
cfg = Config(parameter={'nearest_neighbor_num': 3})
anomaly = Anomaly.run(cfg)
# Update the anomaly model using negative dataset.
for (idx, row_id, flag, score) in anomaly.add(dataset_neg):
pass
# Calculate LOF scores for the full dataset.
# It is expected that `Iris-virginica` records get higher scores than others.
y_true = []
y_score = []
for (idx, row_id, flag, score) in anomaly.calc_score(dataset):
y_true.append(is_positive(flag))
y_score.append(score)
print('Score ({0}): {1}'.format(flag, score))
# Stop the Anomaly serivce.
anomaly.stop()
try:
# If scikit-learn is available, display metrics.
import sklearn.metrics
print('-----------------------------')
print('AUC: {0}'.format(sklearn.metrics.roc_auc_score(y_true, y_score)))
print('-----------------------------')
print('Score Threshold and Precision:')
(fpr, tpr, thresholds) = sklearn.metrics.roc_curve(y_true, y_score)
for i in range(len(thresholds)):
print(' Threshold: {0:10.10f} -> True Positive Rate: {1:1.4f}, False Positive Rate: {2:1.4f}'.format(thresholds[i], tpr[i], fpr[i]))
except ImportError:
print('sklearn is not installed; metrics cannot be calculated')
|
mit
|
lbishal/scikit-learn
|
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
|
46
|
2798
|
"""Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
|
bsd-3-clause
|
nfsli926/tushare
|
setup.py
|
9
|
2613
|
from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='China stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock', 'tushare.data', 'tushare.util', 'tushare.datayes'],
package_data={'': ['*.csv']},
)
|
bsd-3-clause
|
NWine/trading-with-python
|
spreadApp/makeDist.py
|
77
|
1720
|
from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None)
|
bsd-3-clause
|
amitibo/cuda-convnet2
|
convdata.py
|
174
|
14675
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
|
apache-2.0
|
jayflo/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
empireryan/director
|
src/python/ddapp/callbacks.py
|
11
|
6474
|
from weakref import ref
import new
'''
CallbackRegistry is a class taken from matplotlib.cbook.
http://sourceforge.net/p/matplotlib/code/HEAD/tree/trunk/matplotlib/lib/matplotlib/cbook.py
'''
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they
are no longer needed to avoid dangling references (and thus memory
leaks). However, real code in matplotlib rarely does so, and due
to its design, it is rather difficult to place this kind of code.
To get around this, and prevent this class of memory leaks, we
instead store weak references to bound methods only, so when the
destination object needs to die, the CallbackRegistry won't keep
it alive. The Python stdlib weakref module can not create weak
references to bound methods directly, so we need to create a proxy
object to handle weak references to bound methods (or regular free
functions). This technique was shared by Peter Parente on his
`"Mindtrove" blog
<http://mindtrove.info/articles/python-weak-references/>`_.
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set()
self.callbacks = dict()
for s in signals:
self.addSignal(s)
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def addSignal(self, sig):
if sig not in self.signals:
self.signals.add(sig)
self.callbacks[sig] = dict()
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
proxy = BoundMethodProxy(func)
for cid, callback in self.callbacks[s].items():
# Clean out dead references
if callback.inst is not None and callback.inst() is None:
del self.callbacks[s][cid]
elif callback == proxy:
return cid
self._cid += 1
self.callbacks[s][self._cid] = proxy
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try:
del callbackd[cid]
except KeyError:
continue
else:
return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for cid, proxy in self.callbacks[s].items():
# Clean out dead references
if proxy.inst is not None and proxy.inst() is None:
del self.callbacks[s][cid]
else:
proxy(*args, **kwargs)
def getCallbacks(self, s):
"""
return callbacks registered to signal *s*.
"""
self._check_signal(s)
callbacks = []
for cid, proxy in self.callbacks[s].items():
# Clean out dead references
if proxy.inst is not None and proxy.inst() is None:
del self.callbacks[s][cid]
else:
callbacks.append(proxy)
return callbacks
class BoundMethodProxy(object):
'''
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
'''
def __init__(self, cb):
try:
try:
self.inst = ref(cb.im_self)
except TypeError:
self.inst = None
self.func = cb.im_func
self.klass = cb.im_class
except AttributeError:
self.inst = None
self.func = cb
self.klass = None
def __call__(self, *args, **kwargs):
'''
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
'''
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the instance
mtd = new.instancemethod(self.func, self.inst(), self.klass)
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
'''
Compare the held function and instance with that held by
another proxy.
'''
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
'''
Inverse of __eq__.
'''
return not self.__eq__(other)
|
bsd-3-clause
|
nhuntwalker/astroML
|
book_figures/chapter9/fig_photoz_basic.py
|
3
|
5824
|
"""
Photometric Redshifts via Linear Regression
-------------------------------------------
Linear Regression for photometric redshifts
We could use sklearn.linear_model.LinearRegression, but to be more
transparent, we'll do it by hand using linear algebra.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import itertools
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics.pairwise import euclidean_distances
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
data = fetch_sdss_specgals()
# put magnitudes in a matrix
# with a constant (for the intercept) at position zero
mag = np.vstack([np.ones(data.shape)]
+ [data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 distinct points
mag_test = mag[1::100]
z_test = z[1::100]
def plot_results(z, z_fit, plotlabel=None,
xlabel=True, ylabel=True):
plt.scatter(z, z_fit, s=1, lw=0, c='k')
plt.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
plt.xlim(-0.05, 0.4001)
plt.ylim(-0.05, 0.4001)
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.1))
if plotlabel:
plt.text(0.03, 0.97, plotlabel,
ha='left', va='top', transform=ax.transAxes)
if xlabel:
plt.xlabel(r'$\rm z_{true}$')
else:
plt.gca().xaxis.set_major_formatter(plt.NullFormatter())
if ylabel:
plt.ylabel(r'$\rm z_{fit}$')
else:
plt.gca().yaxis.set_major_formatter(plt.NullFormatter())
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in itertools.product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def poly_features(X, p):
"""Compute polynomial features
Parameters
----------
X: array_like
shape (n_samples, n_features)
p: int
degree of polynomial
Returns
-------
X_p: array
polynomial feature matrix
"""
X = np.asarray(X)
N, D = X.shape
ind = list(combinations_with_replacement(range(D), p))
X_poly = np.empty((X.shape[0], len(ind)))
for i in range(len(ind)):
X_poly[:, i] = X[:, ind[i]].prod(1)
return X_poly
def gaussian_RBF_features(X, centers, widths):
"""Compute gaussian Radial Basis Function features
Parameters
----------
X: array_like
shape (n_samples, n_features)
centers: array_like
shape (n_centers, n_features)
widths: array_like
shape (n_centers, n_features) or (n_centers,)
Returns
-------
X_RBF: array
RBF feature matrix, shape=(n_samples, n_centers)
"""
X, centers, widths = map(np.asarray, (X, centers, widths))
if widths.ndim == 1:
widths = widths[:, np.newaxis]
return np.exp(-0.5 * ((X[:, np.newaxis, :]
- centers) / widths) ** 2).sum(-1)
plt.figure(figsize=(5, 5))
plt.subplots_adjust(hspace=0.05, wspace=0.05,
left=0.1, right=0.95,
bottom=0.1, top=0.95)
#----------------------------------------------------------------------
# first do a simple linear regression between the r-band and redshift,
# ignoring uncertainties
ax = plt.subplot(221)
X_train = mag_train[:, [0, 3]]
X_test = mag_test[:, [0, 3]]
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit,
plotlabel='Linear Regression:\n r-band',
xlabel=False)
#----------------------------------------------------------------------
# next do a linear regression with all bands
ax = plt.subplot(222)
z_fit = LinearRegression().fit(mag_train, z_train).predict(mag_test)
plot_results(z_test, z_fit, plotlabel="Linear Regression:\n ugriz bands",
xlabel=False, ylabel=False)
#----------------------------------------------------------------------
# next do a 3rd-order polynomial regression with all bands
ax = plt.subplot(223)
X_train = poly_features(mag_train, 3)
X_test = poly_features(mag_test, 3)
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit, plotlabel="3rd order Polynomial\nRegression")
#----------------------------------------------------------------------
# next do a radial basis function regression with all bands
ax = plt.subplot(224)
# remove bias term
mag = mag[:, 1:]
mag_train = mag_train[:, 1:]
mag_test = mag_test[:, 1:]
centers = mag[np.random.randint(mag.shape[0], size=100)]
centers_dist = euclidean_distances(centers, centers, squared=True)
widths = np.sqrt(centers_dist[:, :10].mean(1))
X_train = gaussian_RBF_features(mag_train, centers, widths)
X_test = gaussian_RBF_features(mag_test, centers, widths)
z_fit = LinearRegression().fit(X_train, z_train).predict(X_test)
plot_results(z_test, z_fit, plotlabel="Gaussian Basis Function\nRegression",
ylabel=False)
plt.show()
|
bsd-2-clause
|
wjfwzzc/Kaggle_Script
|
walmart_recruiting_trip_type_classification/data/load_data.py
|
1
|
1704
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import zipfile
import numpy
import pandas
import sklearn.preprocessing
def process_dummy(df, feature):
df = df.join(pandas.get_dummies(df[feature], prefix=feature))
df.drop(feature, axis=1, inplace=True)
return df
def process_scale(df, feature):
df[feature].fillna(df[feature].dropna().median(), inplace=True)
df[feature] = sklearn.preprocessing.scale(df[feature].astype(numpy.float64), copy=False)
return df
z = zipfile.ZipFile("./data/train.csv.zip")
train_df = pandas.read_csv(z.open("train.csv"))
z = zipfile.ZipFile("./data/test.csv.zip")
z.setpassword("Work4WalmarT")
test_df = pandas.read_csv(z.open("test.csv"))
train_df.drop(["Upc", "FinelineNumber"], axis=1, inplace=True)
train_df = process_dummy(train_df, "Weekday")
train_df = process_dummy(train_df, "DepartmentDescription")
train_df = train_df.groupby(["VisitNumber", "TripType"], as_index=False).sum()
test_df.drop(["Upc", "FinelineNumber"], axis=1, inplace=True)
test_df = process_dummy(test_df, "Weekday")
test_df = process_dummy(test_df, "DepartmentDescription")
test_df = test_df.groupby("VisitNumber", as_index=False).sum()
data_df = train_df.append(test_df).reset_index(drop=True)
data_df.drop(["TripType", "VisitNumber"], axis=1, inplace=True)
data_df.fillna(0, inplace=True)
data_df = process_scale(data_df, "ScanCount")
le = sklearn.preprocessing.LabelEncoder()
train_df["TripType"] = le.fit_transform(train_df["TripType"])
target = train_df["TripType"].astype('category')
ids = test_df["VisitNumber"].values
train = data_df[:train_df.shape[0]].values
test = data_df[train_df.shape[0]:].values
|
mit
|
plissonf/scikit-learn
|
examples/cluster/plot_lena_ward_segmentation.py
|
271
|
1998
|
"""
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
glouppe/scikit-learn
|
examples/cluster/plot_lena_compress.py
|
271
|
2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
|
bsd-3-clause
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/sklearn/cluster/birch.py
|
22
|
22733
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. If None, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
mit
|
michael-hoffman/titanic-revisited
|
titanic_stacking.py
|
1
|
10406
|
#
# General central framework to run stacked model to predict survival on the
# Titanic.
#
# Authors: Charlie Bonfield and Michael Hoffman
# Last Modified: July 2017
## Import statements
# General
#import re
import sys
import scipy
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from titanic_preprocessing import Useful_Preprocessing # conglomeration of stuff
# used for preprocessing
# Base Models (assorted classification algorithms, may or may not use all of
# these)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
#from sklearn.grid_search import RandomizedSearchCV # old sklearn
from sklearn.model_selection import RandomizedSearchCV # new sklearn
#from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier, ExtraTreesClassifier
# Second Layer Model
import xgboost as xgb
# Helper function for all sklearn classifiers.
class Sklearn_Helper(object):
def __init__(self, classifier, seed=0, params=None):
params['random_state'] = seed
self.classifier = classifier(**params)
def train(self, x_train, y_train):
self.classifier.fit(x_train, y_train)
def predict(self, x):
return self.classifier.predict(x)
def fit(self,x,y):
return self.classifier.fit(x,y)
def feature_importances(self,x,y):
print(self.classifier.fit(x,y).feature_importances_)
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
# Perform hyperparameter tuning for given training set (will do this implicitly
# for every fold).
def hyperparameter_tuning(classifier, param_dist, n_iterations, X, y):
clf = classifier()
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iterations, n_jobs=-1)
random_search.fit(X, y)
best_params = random_search.best_params_
return best_params
# Generate out-of-fold predictions for training set. For each fold, generate
# a prediction for the test set, then return the average of those predictions
# for out test set "meta feature".
def get_out_of_fold_predictions(classifier, param_dist, kf, n_folds, x_train,
y_train, n_train, x_test, n_test, seed):
oof_train = np.zeros((n_train,))
meta_test = np.zeros((n_test,))
oof_test_full = np.empty((n_folds, n_test))
# Iterate over sets of training/test indices corresponding to each fold.
for i, (train_indices, test_indices) in enumerate(kf):
#print(train_index)
#print(test_index)
x_tr = x_train[train_indices]
y_tr = y_train[train_indices]
x_te = x_train[test_indices]
best_params = hyperparameter_tuning(classifier, param_dist, 50, x_tr, y_tr)
clf = Sklearn_Helper(classifier, seed=seed, params=best_params)
clf.train(x_tr, y_tr)
oof_train[test_indices] = clf.predict(x_te)
oof_test_full[i, :] = clf.predict(x_test)
# Generate predictions for actual test set (use entire training set).
best_params = hyperparameter_tuning(classifier, param_dist, 10000, x_train, y_train)
clf = Sklearn_Helper(classifier, seed=seed, params=best_params)
clf.train(x_train, y_train)
meta_test[:] = clf.predict(x_test)
return oof_train.reshape(-1, 1), meta_test.reshape(-1, 1)
# Load in data.
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
# Impute missing 'Fare' values with median.
train_data['Fare'] = train_data['Fare'].fillna(train_data['Fare'].median())
test_data['Fare'] = test_data['Fare'].fillna(test_data['Fare'].median())
# Combine data for preprocessing (should not be an issue, as most of this is
# just casting categorical features as numbers and dropping things we do not
# wish to use).
pp = Useful_Preprocessing()
combined = pd.concat([train_data, test_data], ignore_index=True)
combined = pp.transform_all(combined)
# Split back out into training/test sets.
train_data = combined[:891]
test_data = combined[891:].drop('Survived', axis=1)
# Split passenger IDs from training/test sets.
train_ids = train_data['PassengerId']
train_data.drop(['PassengerId'], axis=1, inplace=True)
test_ids = test_data['PassengerId']
test_data.drop(['PassengerId'], axis=1, inplace=True)
# Impute ages (was doing this previously with the combined train/test set,
# now doing separately).
train_data = pp.impute_ages(train_data)
test_data = pp.impute_ages(test_data)
# Standardize age/fare features.
scaler = preprocessing.StandardScaler()
select = 'Age Fare Parch SibSp Family_Size'.split()
train_data[select] = scaler.fit_transform(train_data[select])
test_data[select] = scaler.transform(test_data[select])
#sys.exit(0)
# Prepare for stacking (these parameters will be needed to generate out-of-fold
# predictions).
n_train = train_data.shape[0]
n_test = test_data.shape[0]
SEED = 42
NFOLDS = 5 # set the number of folds for out-of-fold prediction
kf = KFold(n_train, n_folds= NFOLDS, random_state=SEED)
# Split out target feature for training set, rename train/test data for convenience.
y_train = train_data['Survived'].ravel()
train_data = train_data.drop(['Survived'], axis=1)
x_train = train_data.values
x_test = test_data.values
#UNCOMMENT IF YOU WISH TO GENERATE FIRST-LEVEL PREDICTIONS.
# Provide set of parameter distributions to be searched by RandomSearchCV
# for each classifer (needed for tuning, can be customized).
#
# SAMPLE: param_dist = {'C': scipy.stats.uniform(0.1, 1000),
# 'gamma': scipy.stats.uniform(.001, 1.0),
# 'kernel': ['rbf'], 'class_weight':['balanced', None]}
#
svc_dist = {'C': scipy.stats.uniform(0.1,1000),
'gamma': scipy.stats.uniform(.001,1.0),
'kernel': ['rbf'], 'class_weight':['balanced', None]}
ada_dist = {'n_estimators': scipy.stats.randint(1,101),
'learning_rate': scipy.stats.uniform(.001,1.0)}
rf_dist = {'n_estimators': scipy.stats.randint(1,101), 'warm_start': [True],
'max_depth': scipy.stats.randint(2,7),
'min_samples_leaf': scipy.stats.randint(1,4)}
gb_dist = {'n_estimators': scipy.stats.randint(1,101), 'warm_start': [True],
'max_depth': scipy.stats.randint(2,7),
'min_samples_leaf': scipy.stats.randint(1,4)}
et_dist = {'n_estimators': scipy.stats.randint(1,101), 'warm_start': [True],
'max_depth': scipy.stats.randint(2,7),
'min_samples_leaf': scipy.stats.randint(1,4)}
# Generate first-level predictions.
# Arguments: (classifer, param_dist, kf, n_folds, x_train, y_train, n_train,
# x_test, n_test, seed)
print('Generating first-level predictions:')
svc_fl_train, svc_fl_test = get_out_of_fold_predictions(SVC,svc_dist,kf,NFOLDS,
x_train, y_train, n_train,
x_test, n_test, SEED)
ada_fl_train, ada_fl_test = get_out_of_fold_predictions(AdaBoostClassifier,
ada_dist, kf, NFOLDS,
x_train, y_train, n_train,
x_test, n_test, SEED)
rf_fl_train, rf_fl_test = get_out_of_fold_predictions(RandomForestClassifier,
rf_dist,kf,NFOLDS,x_train,
y_train, n_train, x_test,
n_test, SEED)
gb_fl_train, gb_fl_test = get_out_of_fold_predictions(GradientBoostingClassifier,
gb_dist,kf,NFOLDS,x_train,
y_train, n_train, x_test,
n_test, SEED)
et_fl_train, et_fl_test = get_out_of_fold_predictions(ExtraTreesClassifier,
et_dist,kf,NFOLDS,x_train,
y_train, n_train, x_test,
n_test, SEED)
# Save results, will be fed into second level.
x_train_meta = np.concatenate((svc_fl_train,ada_fl_train,rf_fl_train,gb_fl_train,
et_fl_train), axis=1)
x_test_meta = np.concatenate((svc_fl_test,ada_fl_test,rf_fl_test,gb_fl_test,
et_fl_test), axis=1)
np.savetxt('meta_train.txt', x_train_meta)
np.savetxt('meta_test.txt', x_test_meta)
"""
# Load in first-level predictions for train/test sets.
x_train_meta = np.loadtxt('meta_train.txt')
x_test_meta = np.loadtxt('meta_test.txt')
"""
# Provide set of parameter distributions to be searched for the second-level
# xgboost model.
xgb_dist = {'learning_rate': scipy.stats.uniform(0.1,0.9), 'objective': ['reg:linear'],
'max_depth': scipy.stats.randint(2,7),
'subsample': [0.8], 'colsample_bytree': [0.8],
#'subsample': scipy.stats.uniform(0.5,0.9),
#'colsample_bytree': scipy.stats.uniform(0.5,0.9),
'min_child_weight': scipy.stats.randint(1,5),
'n_estimators': scipy.stats.randint(1,101)}
# Generate second-level predictions using meta features.
xgb_params = hyperparameter_tuning(xgb.XGBClassifier,xgb_dist,50,x_train_meta,
y_train)
xgb_clf = xgb.XGBClassifier(**xgb_params)
xgb_clf.fit(x_train_meta, y_train)
test_preds = xgb_clf.predict(x_test_meta)
# Spit out predictions to submission file.
submission = pd.DataFrame({"PassengerId": test_ids.astype(int),
"Survived": test_preds.astype(int)})
submission.to_csv('mdh_submission_v1.csv', index=False)
|
gpl-3.0
|
imaculate/scikit-learn
|
examples/model_selection/plot_confusion_matrix.py
|
47
|
2495
|
"""
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
|
bsd-3-clause
|
SteveDiamond/cvxpy
|
cvxpy/cvxcore/tests/python/364A_scripts/fir_filter.py
|
4
|
1478
|
import numpy as np
from cvxpy import *
import matplotlib.pyplot as plt
import time
np.random.seed(1)
N = 100
ANSWERS = []
TIME = 0
# create an increasing input signal
xtrue = np.zeros(N)
xtrue[0:40] = 0.1
xtrue[49] = 2
xtrue[69:80] = 0.15
xtrue[79] = 1
xtrue = np.cumsum(xtrue)
# pass the increasing input through a moving-average filter
# and add Gaussian noise
h = np.array([1, -0.85, 0.7, -0.3])
k = h.size
yhat = np.convolve(h,xtrue)
y = yhat[0:-3].reshape(N,1) + np.random.randn(N,1)
xtrue = np.asmatrix(xtrue.reshape(N,1))
y = np.asmatrix(y)
v = Variable(N)
x = Variable(N)
constraints = [x >= 0]
for i in range(N - 1):
constraints.append( x[i+1] >= x[i] )
constraints.append( y == ( conv(h,x)[0:-3] + v ) )
obj = Minimize( norm(v) )
prob = Problem(obj, constraints)
tic = time.time()
ANSWERS.append(prob.solve())
toc = time.time()
TIME += toc - tic
pass #print x.value
pass #print v.value
pass #plt.figure(1)
pass #plt.plot(xtrue)
pass #plt.plot(x.value)
pass #plt.legend(["True signal", "MLE signal"])
pass #plt.title("Maximum likelihood reconstruction of FIR filter with constraints")
pass #plt.show()
constraints.append( y == ( conv(h,x)[0:-3] + v ) )
obj = Minimize( norm(v) )
prob = Problem(obj, constraints)
ANSWERS.append(prob.solve())
pass #plt.figure(1)
pass #plt.plot(xtrue)
pass #plt.plot(x.value)
pass #plt.legend(["True signal", "MLE signal"])
pass #plt.title("Maximum likelihood reconstruction of FIR filter no constraints")
pass #plt.show()
|
gpl-3.0
|
glouppe/scikit-optimize
|
skopt/learning/tests/test_gbrt.py
|
1
|
3305
|
import numpy as np
from scipy import stats
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from skopt.learning import GradientBoostingQuantileRegressor
def truth(X):
return 0.5 * np.sin(1.75*X[:, 0])
def constant_noise(X):
return np.ones_like(X)
def sample_noise(X, std=0.2, noise=constant_noise,
random_state=None):
"""Uncertainty inherent to the process
The regressor should try and model this.
"""
rng = check_random_state(random_state)
return np.array([rng.normal(0, std*noise(x)) for x in X])
def test_gbrt_gaussian():
# estimate quantiles of the normal distribution
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
rgr = GradientBoostingQuantileRegressor()
rgr.fit(X, y)
estimates = rgr.predict(X)
assert_almost_equal(stats.norm.ppf(rgr.quantiles),
np.mean(estimates, axis=0),
decimal=2)
def test_gbrt_base_estimator():
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
base = RandomForestRegressor()
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
assert_raise_message(ValueError, 'type GradientBoostingRegressor',
rgr.fit, X, y)
base = GradientBoostingRegressor()
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
assert_raise_message(ValueError, 'quantile loss', rgr.fit, X, y)
base = GradientBoostingRegressor(loss='quantile', n_estimators=20)
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
rgr.fit(X, y)
estimates = rgr.predict(X)
assert_almost_equal(stats.norm.ppf(rgr.quantiles),
np.mean(estimates, axis=0),
decimal=2)
def test_gbrt_with_std():
# simple test of the interface
rng = np.random.RandomState(1)
X = rng.uniform(0, 5, 500)[:, np.newaxis]
noise_level = 0.5
y = truth(X) + sample_noise(X, noise_level, random_state=rng)
X_ = np.linspace(0, 5, 1000)[:, np.newaxis]
model = GradientBoostingQuantileRegressor()
model.fit(X, y)
assert_array_equal(model.predict(X_).shape, (len(X_), 3))
l, c, h = model.predict(X_).T
assert_equal(l.shape, c.shape, h.shape)
assert_equal(l.shape[0], X_.shape[0])
mean, std = model.predict(X_, return_std=True)
assert_array_equal(mean, c)
assert_array_equal(std, (h - l) / 2.0)
def test_gbrt_in_parallel():
# check estimate quantiles with parallel
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
rgr = GradientBoostingQuantileRegressor(
n_jobs=1, random_state=np.random.RandomState(1))
rgr.fit(X, y)
estimates = rgr.predict(X)
rgr.set_params(n_jobs=2, random_state=np.random.RandomState(1))
rgr.fit(X, y)
estimates_parallel = rgr.predict(X)
assert_array_equal(estimates, estimates_parallel)
|
bsd-3-clause
|
wangyou/XX-Net
|
code/default/gae_proxy/server/lib/google/appengine/api/appinfo.py
|
5
|
73693
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
Library for working with AppInfo records in memory, store and load from
configuration files.
"""
import logging
import os
import re
import string
import sys
import wsgiref.util
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'.+'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
MODULE_ID_MAX_LEN = 63
MODULE_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
MODULE_SEPARATOR = ':'
DEFAULT_MODULE = 'default'
PARTITION_RE_STRING_WITHOUT_SEPARATOR = (r'[a-z\d\-]{1,%d}' % APP_ID_MAX_LEN)
PARTITION_RE_STRING = (r'%s\%s' %
(PARTITION_RE_STRING_WITHOUT_SEPARATOR,
PARTITION_SEPARATOR))
DOMAIN_RE_STRING_WITHOUT_SEPARATOR = (r'(?!\-)[a-z\d\-\.]{1,%d}' %
APP_ID_MAX_LEN)
DOMAIN_RE_STRING = (r'%s%s' %
(DOMAIN_RE_STRING_WITHOUT_SEPARATOR, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
MODULE_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (MODULE_ID_MAX_LEN - 1)
MODULE_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(MODULE_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCES_REGEX = r'^[1-9][\d]*$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4|4_1G)|[bB](1|2|4|8|4_1G))$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
GCE_RESOURCE_NAME_REGEX = r'^[a-z]([a-z\d-]{0,61}[a-z\d])?$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9\-]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
ENV_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
DATASTORE_ID_POLICY_LEGACY = 'legacy'
DATASTORE_ID_POLICY_DEFAULT = 'default'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
SKIP_NO_FILES = r'(?!)'
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
REDIRECT_HTTP_RESPONSE_CODE = 'redirect_http_response_code'
APPLICATION = 'application'
PROJECT = 'project'
MODULE = 'module'
SERVICE = 'service'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
BETA_SETTINGS = 'beta_settings'
VM_HEALTH_CHECK = 'vm_health_check'
HEALTH_CHECK = 'health_check'
RESOURCES = 'resources'
NETWORK = 'network'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
ENV = 'env'
ENTRYPOINT = 'entrypoint'
RUNTIME_CONFIG = 'runtime_config'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
DATASTORE_AUTO_ID_POLICY = 'auto_id_policy'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
PAGESPEED = 'pagespeed'
SOURCE_REPO_RE_STRING = r'^[a-z][a-z0-9\-\+\.]*:[^#]*$'
SOURCE_REVISION_RE_STRING = r'^[0-9a-fA-F]+$'
SOURCE_REFERENCES_MAX_SIZE = 2048
INSTANCE_CLASS = 'instance_class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
MIN_NUM_INSTANCES = 'min_num_instances'
MAX_NUM_INSTANCES = 'max_num_instances'
COOL_DOWN_PERIOD_SEC = 'cool_down_period_sec'
CPU_UTILIZATION = 'cpu_utilization'
CPU_UTILIZATION_UTILIZATION = 'target_utilization'
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC = 'aggregation_window_length_sec'
TARGET_NETWORK_SENT_BYTES_PER_SEC = 'target_network_sent_bytes_per_sec'
TARGET_NETWORK_SENT_PACKETS_PER_SEC = 'target_network_sent_packets_per_sec'
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC = 'target_network_received_bytes_per_sec'
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC = (
'target_network_received_packets_per_sec')
TARGET_DISK_WRITE_BYTES_PER_SEC = 'target_disk_write_bytes_per_sec'
TARGET_DISK_WRITE_OPS_PER_SEC = 'target_disk_write_ops_per_sec'
TARGET_DISK_READ_BYTES_PER_SEC = 'target_disk_read_bytes_per_sec'
TARGET_DISK_READ_OPS_PER_SEC = 'target_disk_read_ops_per_sec'
TARGET_REQUEST_COUNT_PER_SEC = 'target_request_count_per_sec'
TARGET_CONCURRENT_REQUESTS = 'target_concurrent_requests'
INSTANCES = 'instances'
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
PAGES = 'pages'
NAME = 'name'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
ENABLE_HEALTH_CHECK = 'enable_health_check'
CHECK_INTERVAL_SEC = 'check_interval_sec'
TIMEOUT_SEC = 'timeout_sec'
UNHEALTHY_THRESHOLD = 'unhealthy_threshold'
HEALTHY_THRESHOLD = 'healthy_threshold'
RESTART_THRESHOLD = 'restart_threshold'
HOST = 'host'
CPU = 'cpu'
MEMORY_GB = 'memory_gb'
DISK_SIZE_GB = 'disk_size_gb'
FORWARDED_PORTS = 'forwarded_ports'
INSTANCE_TAG = 'instance_tag'
NETWORK_NAME = 'name'
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
latest_version,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for _VersionedLibrary.
Args:
name: The name of the library e.g. "django".
url: The URL for the library's project page e.g.
"http://www.djangoproject.com/".
description: A short description of the library e.g. "A framework...".
supported_versions: A list of supported version names ordered by release
date e.g. ["v1", "v2", "v3"].
latest_version: The version of the library that will be used when
customers specify "latest." The rule of thumb is that this should
be the newest version that is neither deprecated nor experimental
(although may be experimental if all supported versions are either
deprecated or experimental).
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime or None if the library is not available by
default e.g. "v1".
deprecated_versions: A list of the versions of the library that have been
deprecated e.g. ["v1", "v2"].
experimental_versions: A list of the versions of the library that are
current experimental e.g. ["v1"].
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.latest_version = latest_version
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5'],
latest_version='1.4',
experimental_versions=['1.5'],
),
_VersionedLibrary(
'endpoints',
'https://developers.google.com/appengine/docs/python/endpoints/',
'Libraries for building APIs in an App Engine application.',
['1.0'],
latest_version='1.0',
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3', '2.3.5'],
latest_version='2.3',
experimental_versions=['2.3.5'],
),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15'],
latest_version='0.15',
),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
latest_version='1.2.0',
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4', '1.2.4', '1.2.5'],
latest_version='1.2.4b4',
experimental_versions=['1.2.4b4', '1.2.4', '1.2.5']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1'],
latest_version='1.6.1',
),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7'],
latest_version='1.1.7',
),
_VersionedLibrary(
'protorpc',
'https://code.google.com/p/google-protorpc/',
'A framework for implementing HTTP-based remote procedure call (RPC) '
'services.',
['1.0'],
latest_version='1.0',
default_version='1.0',
),
_VersionedLibrary(
'crcmod',
'http://crcmod.sourceforge.net/',
'A library for generating Cyclic Redundancy Checks (CRC).',
['1.7'],
latest_version='1.7',
),
_VersionedLibrary(
'PyAMF',
'http://pyamf.appspot.com/index.html',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1', '0.7.2'],
latest_version='0.6.1',
experimental_versions=['0.7.2'],
),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6'],
latest_version='2.6',
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11'],
latest_version='0.6c11',
),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7'],
latest_version='2.7',
),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
latest_version='2.5.2',
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
latest_version='1.2.3',
default_version='1.1.1',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
latest_version='3.10',
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s"')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS = 10240
_CANNED_RUNTIMES = ('contrib-dart', 'dart', 'go', 'php', 'php55', 'python',
'python27', 'python-compat', 'java', 'java7', 'vm',
'custom', 'nodejs', 'ruby')
_all_runtimes = _CANNED_RUNTIMES
def GetAllRuntimes():
"""Returns the list of all valid runtimes.
This can include third-party runtimes as well as canned runtimes.
Returns:
Tuple of strings.
"""
return _all_runtimes
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values what http_headers allows.
http_headers is an static handler key i.e. it applies to handlers with
static_dir or static_files keys. An example of how http_headers is used is
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-content-security-policy': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'x-webkit-csp': _MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'content-security-policy-report-only':
_MAX_HEADER_SIZE_FOR_EXEMPTED_HEADERS,
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in HttpHeadersDict i.e. header names are valid.
An instance is used as HttpHeadersDict's KEY_VALIDATOR.
"""
def Validate(self, name, unused_key=None):
"""Returns argument, or raises an exception if it is invalid.
HTTP header names are defined by RFC 2616 section 4.2.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: argument cannot be used as an HTTP
header name.
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in HttpHeadersDict i.e. header values are valid.
An instance is used as HttpHeadersDict's VALUE_VALIDATOR.
"""
def Validate(self, value, key=None):
"""Returns value, or raises an exception if it is invalid.
According to RFC 2616 section 4.2, header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string".
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: argument cannot be used as an
HTTP header value.
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to header_name. If more than one such
value is in self, one of the values is selected arbitrarily, and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Mapping from URLs to handlers.
This class acts like something of a union type. Its purpose is to
describe a mapping between a set of URLs and their handlers. What
handler type a given instance has is determined by which handler-id
attribute is used.
Each mapping can have one and only one handler type. Attempting to
use more than one handler-id attribute will cause an UnknownHandlerType
to be raised during validation. Failure to provide any handler-id
attributes will cause MissingHandlerType to be raised during validation.
The regular expression used by the url field will be used to match against
the entire URL path and query string of the request. This means that
partial maps will not be matched. Specifying a url, say /admin, is the
same as matching against the regular expression '^/admin$'. Don't begin
your matching url with ^ or end them with $. These regular expressions
won't be accepted and will raise ValueError.
Attributes:
login: Whether or not login is required to access URL. Defaults to
'optional'.
secure: Restriction on the protocol which can be used to serve
this URL/handler (HTTP, HTTPS or either).
url: Regular expression used to fully match against the request URLs path.
See Special Cases for using static_dir.
static_files: Handler id attribute that maps URL to the appropriate
file. Can use back regex references to the string matched to url.
upload: Regular expression used by the application configuration
program to know which files are uploaded as blobs. It's very
difficult to determine this using just the url and static_files
so this attribute must be included. Required when defining a
static_files mapping.
A matching file name must fully match against the upload regex, similar
to how url is matched against the request path. Do not begin upload
with ^ or end it with $.
static_dir: Handler id that maps the provided url to a sub-directory
within the application directory. See Special Cases.
mime_type: When used with static_files and static_dir the mime-type
of files served from those directories are overridden with this
value.
script: Handler id that maps URLs to scipt handler within the application
directory that will run using CGI.
position: Used in AppInclude objects to specify whether a handler
should be inserted at the beginning of the primary handler list or at the
end. If 'tail' is specified, the handler is inserted at the end,
otherwise, the handler is inserted at the beginning. This means that
'head' is the effective default.
expiration: When used with static files and directories, the time delta to
use for cache expiration. Has the form '4d 5h 30m 15s', where each letter
signifies days, hours, minutes, and seconds, respectively. The 's' for
seconds may be omitted. Only one amount must be specified, combining
multiple amounts is optional. Example good values: '10', '1d 6h',
'1h 30m', '7d 7d 7d', '5m 30'.
api_endpoint: Handler id that identifies endpoint as an API endpoint,
calls that terminate here will be handled by the api serving framework.
Special cases:
When defining a static_dir handler, do not use a regular expression
in the url attribute. Both the url and static_dir attributes are
automatically mapped to these equivalents:
<url>/(.*)
<static_dir>/\1
For example:
url: /images
static_dir: images_folder
Is the same as this static_files declaration:
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
REDIRECT_HTTP_RESPONSE_CODE: validation.Optional(validation.Options(
'301', '302', '303', '307')),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([
URL, LOGIN, AUTH_FAIL_ACTION, SECURE, REDIRECT_HTTP_RESPONSE_CODE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Get handler for mapping.
Returns:
Value of the handler (determined by handler id attribute).
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Get handler type of mapping.
Returns:
Handler type determined by which handler id attribute is set.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a
required attribute for its handler type.
MissingHandlerAttribute: when a URL handler is missing an attribute
"""
if getattr(self, HANDLER_API_ENDPOINT) is not None:
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure handler has correct fields.
In addition to normal ValidatedCheck calls GetHandlerType
which validates all the handler fields are configured
properly.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: when mime_type is inconsistent with
http_headers.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that self.http_headers is consistent with self.mime_type.
Assumes self is a static handler i.e. either self.static_dir or
self.static_files is set (to not None).
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: Raised when
self.http_headers contains a Content-Type header, and self.mime_type is
set. For example, the following configuration would be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
http_headers and mime_type specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Force omitted 'secure: ...' handler fields to 'secure: optional'.
The effect is that handler.secure is never equal to the (nominal)
default.
See http://b/issue?id=2073962.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See:
https://developers.google.com/appengine/docs/python/config/appconfig#Reserved_URLs
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
Raises:
PositionUsedInAppYamlHandler: when position attribute is specified for an
app.yaml file instead of an include.yaml file.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing admin console page in AdminConsole object.
"""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing admin console directives in application info.
"""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Return the result of merging two AdminConsole objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info.
"""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing builtin handler directives in application info.
Permits arbitrary keys but their values must be described by the
validation.Options object returned by ATTRIBUTES.
"""
class DynamicAttributes(dict):
"""Provide a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any get operation. The fixed
value passed in as a constructor parameter should be a
validation.Validated object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensure that all BuiltinHandler objects at least have attribute 'default'.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Permit ATTRIBUTES.iteritems() to return set of items that have values.
Whenever validate calls iteritems(), it is always called on ATTRIBUTES,
not on __dict__, so this override is important to ensure that functions
such as ToYAML() return the correct set of keys.
Raises:
MultipleBuiltinsSpecified: when more than one builtin is defined in a list
element.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError
return None
def ToDict(self):
"""Convert BuiltinHander object to a dictionary.
Returns:
dictionary of the form: {builtin_handler_name: on/off}
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Find if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: list of BuiltinHandler objects (typically yaml.builtins)
builtin_name: name of builtin to find whether or not it is defined
Returns:
true if builtin_name is defined by a member of builtins_list,
false otherwise
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of BuiltinHandler objects to a list of (name, status)."""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verify that all BuiltinHandler objects are valid and not repeated.
Args:
builtins_list: list of BuiltinHandler objects to validate.
runtime: if set then warnings are generated for builtins that have been
deprecated in the given runtime.
Raises:
InvalidBuiltinFormat: if the name of a Builtinhandler object
cannot be determined.
DuplicateBuiltinsSpecified: if a builtin handler name is used
more than once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing api_config handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Raises if the library configuration is not valid."""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version == 'latest':
self.version = supported_library.latest_version
elif self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
use_vers = '", "'.join(supported_library.non_deprecated_versions)
logging.warning(
'%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT,
self.name,
self.version,
use_vers)
class CpuUtilization(validation.Validated):
"""Class representing the configuration of VM CPU utilization."""
ATTRIBUTES = {
CPU_UTILIZATION_UTILIZATION: validation.Optional(
validation.Range(1e-6, 1.0, float)),
CPU_UTILIZATION_AGGREGATION_WINDOW_LENGTH_SEC: validation.Optional(
validation.Range(1, sys.maxint)),
}
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
MIN_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
MAX_NUM_INSTANCES: validation.Optional(validation.Range(1, sys.maxint)),
COOL_DOWN_PERIOD_SEC: validation.Optional(
validation.Range(60, sys.maxint, int)),
CPU_UTILIZATION: validation.Optional(CpuUtilization),
TARGET_NETWORK_SENT_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_SENT_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_NETWORK_RECEIVED_PACKETS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_WRITE_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_BYTES_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_DISK_READ_OPS_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_REQUEST_COUNT_PER_SEC:
validation.Optional(validation.Range(1, sys.maxint)),
TARGET_CONCURRENT_REQUESTS:
validation.Optional(validation.Range(1, sys.maxint)),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class RuntimeConfig(validation.ValidatedDict):
"""Class for "vanilla" runtime configuration.
Fields used vary by runtime, so we delegate validation to the per-runtime
build processes.
These are intended to be used during Dockerfile generation, not after VM boot.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
We don't validate these further here. They're validated server side.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
result_vm_settings = (vm_settings_two or {}).copy()
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class BetaSettings(VmSettings):
"""Class for Beta (internal or unreleased) settings.
This class is meant to replace VmSettings eventually.
All new beta settings must be registered in shared_constants.py.
We don't validate these further here. They're validated server side.
"""
@classmethod
def Merge(cls, beta_settings_one, beta_settings_two):
merged = VmSettings.Merge(beta_settings_one, beta_settings_two)
return BetaSettings(**merged.ToDict()) if merged else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, env_variables_one, env_variables_two):
"""Merges to EnvironmentVariables instances.
Args:
env_variables_one: The first EnvironmentVariables instance or None.
env_variables_two: The second EnvironmentVariables instance or None.
Returns:
The merged EnvironmentVariables instance, or None if both input instances
are None or empty.
If a variable is specified by both instances, the value from
env_variables_two is used.
"""
result_env_variables = (env_variables_one or {}).copy()
result_env_variables.update(env_variables_two or {})
return (EnvironmentVariables(**result_env_variables)
if result_env_variables else None)
def ValidateSourceReference(ref):
"""Determines if a source reference is valid.
Args:
ref: A source reference in a [repository_uri#]revision form.
Raises:
ValidationError: when the reference is malformed.
"""
repo_revision = ref.split('#', 1)
revision_id = repo_revision[-1]
if not re.match(SOURCE_REVISION_RE_STRING, revision_id):
raise validation.ValidationError('Bad revision identifier: %s' %
revision_id)
if len(repo_revision) == 2:
uri = repo_revision[0]
if not re.match(SOURCE_REPO_RE_STRING, uri):
raise validation.ValidationError('Bad repository URI: %s' % uri)
def ValidateCombinedSourceReferencesString(source_refs):
"""Determines if source_refs contains a valid list of source references.
Args:
source_refs: A multi-line string containing one source reference per line.
Raises:
ValidationError: when the reference is malformed.
"""
if len(source_refs) > SOURCE_REFERENCES_MAX_SIZE:
raise validation.ValidationError(
'Total source reference(s) size exceeds the limit: %d > %d' % (
len(source_refs), SOURCE_REFERENCES_MAX_SIZE))
for ref in source_refs.splitlines():
ValidateSourceReference(ref.strip())
class HealthCheck(validation.Validated):
"""Class representing the health check configuration.
"""
ATTRIBUTES = {
ENABLE_HEALTH_CHECK: validation.Optional(validation.TYPE_BOOL),
CHECK_INTERVAL_SEC: validation.Optional(validation.Range(0, sys.maxint)),
TIMEOUT_SEC: validation.Optional(validation.Range(0, sys.maxint)),
UNHEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HEALTHY_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
RESTART_THRESHOLD: validation.Optional(validation.Range(0, sys.maxint)),
HOST: validation.Optional(validation.TYPE_STR)}
class VmHealthCheck(HealthCheck):
"""Class representing the configuration of VM health check.
This class is deprecated and will be removed (use HealthCheck).
"""
pass
class Resources(validation.Validated):
"""Class representing the configuration of VM resources."""
ATTRIBUTES = {
CPU: validation.Optional(validation.TYPE_FLOAT),
MEMORY_GB: validation.Optional(validation.TYPE_FLOAT),
DISK_SIZE_GB: validation.Optional(validation.TYPE_INT)
}
class Network(validation.Validated):
"""Class representing the VM network configuration."""
ATTRIBUTES = {
FORWARDED_PORTS: validation.Optional(validation.Repeated(validation.Regex(
'[0-9]+(:[0-9]+)?(/(udp|tcp))?'))),
INSTANCE_TAG: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
NETWORK_NAME: validation.Optional(validation.Regex(
GCE_RESOURCE_NAME_REGEX)),
}
class AppInclude(validation.Validated):
"""Class representing the contents of an included app.yaml file.
Used for both builtins and includes directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
SKIP_FILES: validation.RegexStr(default=SKIP_NO_FILES),
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of <manual_scaling.instances> from the args.
Note that appinclude_one is mutated to be the merged result in this process.
Also, this function needs to be updated if ManualScaling gets additional
fields.
Args:
appinclude_one: object one to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
appinclude_two: object two to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
Returns:
Object that is the result of merging
appinclude_one.manual_scaling.instances and
appinclude_two.manual_scaling.instances. I.e., <appinclude_one>
after the mutations are complete.
"""
def _Instances(appinclude):
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def _CommonMergeOps(cls, one, two):
"""This function performs common merge operations."""
AppInclude.MergeManualScaling(one, two)
one.admin_console = AdminConsole.Merge(one.admin_console,
two.admin_console)
one.vm = two.vm or one.vm
one.vm_settings = VmSettings.Merge(one.vm_settings,
two.vm_settings)
if hasattr(one, 'beta_settings'):
one.beta_settings = BetaSettings.Merge(one.beta_settings,
two.beta_settings)
one.env_variables = EnvironmentVariables.Merge(one.env_variables,
two.env_variables)
one.skip_files = cls.MergeSkipFiles(one.skip_files, two.skip_files)
return one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""This function merges an app.yaml file with referenced builtins/includes.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
appyaml = cls._CommonMergeOps(appyaml, appinclude)
appyaml.NormalizeVmSettings()
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""Merges the non-referential state of the provided AppInclude.
That is, builtins and includes directives are not preserved, but
any static objects are copied into an aggregate AppInclude object that
preserves the directives of both provided AppInclude objects.
Note that appinclude_one is mutated to be the merged result in this process.
Args:
appinclude_one: object one to merge
appinclude_two: object two to merge
Returns:
AppInclude object that is the result of merging the static directives of
appinclude_one and appinclude_two. I.e., <appinclude_one> after the
mutations are complete.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
return cls._CommonMergeOps(appinclude_one, appinclude_two)
@staticmethod
def MergeSkipFiles(skip_files_one, skip_files_two):
if skip_files_one == SKIP_NO_FILES:
return skip_files_two
if skip_files_two == SKIP_NO_FILES:
return skip_files_one
return validation.RegexStr().Validate(
[skip_files_one, skip_files_two], SKIP_FILES)
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a yaml_object builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language.
For example we specify "php-quercus" if this is a Java app
that was generated from PHP source using Quercus
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific 'expiration' set.
See the URLMap.expiration field's documentation for more information.
skip_files: An re object. Files that match this regular expression will
not be uploaded by appcfg.py. For example:
skip_files: |
.svn.*|
#.*#
nobuild_files: An re object. Files that match this regular expression will
not be built into the app. Go only.
api_config: URL root and script/servlet path for enhanced api serving
"""
ATTRIBUTES = {
APPLICATION: validation.Optional(APPLICATION_RE_STRING),
PROJECT: validation.Optional(APPLICATION_RE_STRING),
MODULE: validation.Optional(MODULE_ID_RE_STRING),
SERVICE: validation.Optional(MODULE_ID_RE_STRING),
VERSION: validation.Optional(MODULE_VERSION_ID_RE_STRING),
RUNTIME: validation.Optional(RUNTIME_RE_STRING),
API_VERSION: validation.Optional(API_VERSION_RE_STRING),
ENV: validation.Optional(ENV_RE_STRING),
ENTRYPOINT: validation.Optional(validation.Type(str)),
RUNTIME_CONFIG: validation.Optional(RuntimeConfig),
INSTANCE_CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BETA_SETTINGS: validation.Optional(BetaSettings),
VM_HEALTH_CHECK: validation.Optional(VmHealthCheck),
HEALTH_CHECK: validation.Optional(HealthCheck),
RESOURCES: validation.Optional(Resources),
NETWORK: validation.Optional(Network),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap), default=[]),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
DATASTORE_AUTO_ID_POLICY: validation.Optional(
validation.Options(DATASTORE_ID_POLICY_LEGACY,
DATASTORE_ID_POLICY_DEFAULT)),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
PAGESPEED: validation.Optional(pagespeedinfo.PagespeedEntry),
}
def CheckInitialized(self):
"""Performs non-regex-based validation.
The following are verified:
- At least one url mapping is provided in the URL mappers.
- Number of url mappers doesn't exceed MAX_URL_MAPS.
- Major version does not contain the string -dot-.
- If api_endpoints are defined, an api_config stanza must be defined.
- If the runtime is python27 and threadsafe is set, then no CGI handlers
can be used.
- That the version name doesn't start with BUILTIN_NAME_PREFIX
- If redirect_http_response_code exists, it is in the list of valid 300s.
- That module and service aren't both set
Raises:
DuplicateLibrary: if the name library name is specified more than once.
MissingURLMapping: if no URLMap object is present in the object.
TooManyURLMappings: if there are too many URLMap entries.
MissingApiConfig: if api_endpoints exist without an api_config.
MissingThreadsafe: if threadsafe is not set but the runtime requires it.
ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
and CGI handlers are specified.
TooManyScalingSettingsError: if more than one scaling settings block is
present.
RuntimeDoesNotSupportLibraries: if libraries clause is used for a runtime
that does not support it (e.g. python25).
ModuleAndServiceDefined: if both 'module' and 'service' keywords are used.
"""
super(AppInfoExternal, self).CheckInitialized()
if self.runtime is None and not self.IsVm():
raise appinfo_errors.MissingRuntimeError(
'You must specify a "runtime" field for non-vm applications.')
elif self.runtime is None:
self.runtime = 'custom'
if (not self.handlers and not self.builtins and not self.includes
and not self.IsVm()):
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if self.service and self.module:
raise appinfo_errors.ModuleAndServiceDefined(
'Cannot define both "module" and "service" in configuration')
vm_runtime_python27 = (
self.runtime == 'vm' and
(hasattr(self, 'vm_settings') and
self.vm_settings and
self.vm_settings.get('vm_runtime') == 'python27') or
(hasattr(self, 'beta_settings') and
self.beta_settings and
self.beta_settings.get('vm_runtime') == 'python27'))
if (self.threadsafe is None and
(self.runtime == 'python27' or vm_runtime_python27)):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to a true or false YAML value')
if self.auto_id_policy == DATASTORE_ID_POLICY_LEGACY:
datastore_auto_ids_url = ('http://developers.google.com/'
'appengine/docs/python/datastore/'
'entities#Kinds_and_Identifiers')
appcfg_auto_ids_url = ('http://developers.google.com/appengine/docs/'
'python/config/appconfig#auto_id_policy')
logging.warning(
"You have set the datastore auto_id_policy to 'legacy'. It is "
"recommended that you select 'default' instead.\n"
"Legacy auto ids are deprecated. You can continue to allocate\n"
"legacy ids manually using the allocate_ids() API functions.\n"
"For more information see:\n"
+ datastore_auto_ids_url + '\n' + appcfg_auto_ids_url + '\n')
if (hasattr(self, 'beta_settings') and self.beta_settings
and self.beta_settings.get('source_reference')):
ValidateCombinedSourceReferencesString(
self.beta_settings.get('source_reference'))
if self.libraries:
if not (vm_runtime_python27 or self.runtime == 'python27'):
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all Library instances active for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries as well as any required dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized Library instances for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries, their required dependencies as well as any
libraries enabled by default. Any libraries with "latest" as their version
will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the AppInfoExternal.
Backend entries may contain directives that modify other parts of the
app.yaml, such as the 'start' directive, which adds a handler for the start
request. This method performs those modifications.
Args:
backend_name: The name of a backend defined in 'backends'.
Raises:
BackendNotFound: if the indicated backend was not listed in 'backends'.
DuplicateBackend: if backend is found more than once in 'backends'.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def GetEffectiveRuntime(self):
"""Returns the app's runtime, resolving VMs to the underlying vm_runtime.
Returns:
The effective runtime: the value of beta/vm_settings.vm_runtime if
runtime is "vm", or runtime otherwise.
"""
if (self.runtime == 'vm' and hasattr(self, 'vm_settings')
and self.vm_settings is not None):
return self.vm_settings.get('vm_runtime')
if (self.runtime == 'vm' and hasattr(self, 'beta_settings')
and self.beta_settings is not None):
return self.beta_settings.get('vm_runtime')
return self.runtime
def SetEffectiveRuntime(self, runtime):
"""Sets the runtime while respecting vm runtimes rules for runtime settings.
Args:
runtime: The runtime to use.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
self.vm_settings['vm_runtime'] = runtime
self.runtime = 'vm'
else:
self.runtime = runtime
def NormalizeVmSettings(self):
"""Normalize Vm settings.
"""
if self.IsVm():
if not self.vm_settings:
self.vm_settings = VmSettings()
if 'vm_runtime' not in self.vm_settings:
self.SetEffectiveRuntime(self.runtime)
if hasattr(self, 'beta_settings') and self.beta_settings:
for field in ['vm_runtime',
'has_docker_image',
'image',
'module_yaml_path']:
if field not in self.beta_settings and field in self.vm_settings:
self.beta_settings[field] = self.vm_settings[field]
def IsVm(self):
return self.vm or self.env == '2'
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (URLMap) objects.
Args:
handlers: A list of a handler (URLMap) objects.
is_include_file: If true, indicates the we are performing validation
for handlers in an AppInclude file, which may contain special directives.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Load a single AppInfo object where one and only one is expected.
Validates that the the values in the AppInfo match the validators defined
in this file. (in particular, in AppInfoExternal.ATTRIBUTES)
Args:
app_info: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInfoExternal as loaded from a YAML file.
Raises:
ValueError: if a specified service is not valid.
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
DuplicateBackend: if backend is found more than once in 'backends'.
yaml_errors.EventError: if the app.yaml fails validation.
appinfo_errors.MultipleProjectNames: if the app.yaml has both 'application'
and 'project'.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
if appyaml.application and appyaml.project:
raise appinfo_errors.MultipleProjectNames(
'Specify one of "application: name" or "project: name"')
elif appyaml.project:
appyaml.application = appyaml.project
appyaml.project = None
appyaml.NormalizeVmSettings()
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
It is used to pass back information about the newly created app to users
after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: MODULE_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Load a single AppInclude object where one and only one is expected.
Args:
app_include: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInclude as loaded from a YAML file.
Raises:
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches _DELTA_REGEX.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_positive_re = re.compile(r'^[ 0-9a-zA-Z\._\+/@\$-]{1,256}$')
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/|^/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
def ValidFilename(filename):
"""Determines if filename is valid.
filename must be a valid pathname.
- It must contain only letters, numbers, @, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
Args:
filename: The filename to validate.
Returns:
An error string if the filename is invalid. Returns '' if the filename
is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
|
bsd-2-clause
|
nooperpudd/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
davisincubator/seal_the_deal
|
notebooks/gen_preds_3.py
|
1
|
7880
|
import numpy as np
import pandas as pd
import os
import cv2
from PIL import Image
from scipy.misc import imread
import matplotlib.pyplot as plt
import skimage.feature
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, Cropping2D
from keras.utils import np_utils
import tensorflow as tf
from collections import Counter
from keras.models import load_model
import datetime
from tqdm import tnrange, tqdm_notebook, tqdm
# % matplotlib
# inline
class_names = ['adult_females', 'adult_males', 'juveniles', 'pups', 'subadult_males']
# my_dir = "/Volumes/dax/seals/Kaggle-NOAA-SeaLions/"
my_dir = "/seal_the_data/"
mismatch_id = [3, 7, 9, 21, 30, 34, 71, 81, 89, 97, 151, 184, 215, 234, 242, 268, 290, 311, 331, 344, 380, 384, 406,
421, 469, 475, 490, 499, 507, 530, 531, 605, 607, 614, 621, 638, 644, 687, 712, 721, 767, 779, 781, 794,
800, 811, 839, 840, 869, 882, 901, 903, 905, 909, 913, 927, 946]
blacklist = []
for i in mismatch_id:
blacklist.append(str(i) + '.jpg')
print(blacklist[:5])
blacklist.append('train.csv')
print(blacklist)
file_names = os.listdir(my_dir + "Train/")
file_names = sorted(file_names, key=lambda
item: (int(item.partition('.')[0]) if item[0].isdigit() else float('inf'), item))
# select a subset of files to run on
file_names = file_names[0:1]
# dataframe to store results in
coordinates_df = pd.DataFrame(index=file_names, columns=class_names)
# print(file_names[:])
for filename in file_names:
if filename in blacklist:
file_names.remove(filename)
else:
# read the Train and Train Dotted images
image_1 = cv2.imread(my_dir + "/TrainDotted/" + filename)
image_2 = cv2.imread(my_dir + "/Train/" + filename)
cut = np.copy(image_2)
# absolute difference between Train and Train Dotted
image_3 = cv2.absdiff(image_1, image_2)
# mask out blackened regions from Train Dotted
mask_1 = cv2.cvtColor(image_1, cv2.COLOR_BGR2GRAY)
mask_1[mask_1 < 20] = 0
mask_1[mask_1 > 0] = 255
mask_2 = cv2.cvtColor(image_2, cv2.COLOR_BGR2GRAY)
mask_2[mask_2 < 20] = 0
mask_2[mask_2 > 0] = 255
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_1)
image_3 = cv2.bitwise_or(image_3, image_3, mask=mask_2)
# convert to grayscale to be accepted by skimage.feature.blob_log
image_3 = cv2.cvtColor(image_3, cv2.COLOR_BGR2GRAY)
# detect blobs
blobs = skimage.feature.blob_log(image_3, min_sigma=3, max_sigma=4, num_sigma=1, threshold=0.02)
adult_males = []
subadult_males = []
pups = []
juveniles = []
adult_females = []
image_circles = image_1
for blob in blobs:
# get the coordinates for each blob
y, x, s = blob
# get the color of the pixel from Train Dotted in the center of the blob
g, b, r = image_1[int(y)][int(x)][:]
# decision tree to pick the class of the blob by looking at the color in Train Dotted
if r > 200 and g < 50 and b < 50: # RED
adult_males.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (0, 0, 255), 10)
elif r > 200 and g > 200 and b < 50: # MAGENTA
subadult_males.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (250, 10, 250), 10)
elif r < 100 and g < 100 and 150 < b < 200: # GREEN
pups.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (20, 180, 35), 10)
elif r < 100 and 100 < g and b < 100: # BLUE
juveniles.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (180, 60, 30), 10)
elif r < 150 and g < 50 and b < 100: # BROWN
adult_females.append((int(x), int(y)))
cv2.circle(image_circles, (int(x), int(y)), 20, (0, 42, 84), 10)
cv2.rectangle(cut, (int(x) - 112, int(y) - 112), (int(x) + 112, int(y) + 112), 0, -1)
coordinates_df["adult_males"][filename] = adult_males
coordinates_df["subadult_males"][filename] = subadult_males
coordinates_df["adult_females"][filename] = adult_females
coordinates_df["juveniles"][filename] = juveniles
coordinates_df["pups"][filename] = pups
x = []
y = []
for filename in tqdm(file_names):
image = cv2.imread(my_dir + "/Train/" + filename)
for lion_class in class_names:
try:
for coordinates in coordinates_df[lion_class][filename]:
thumb = image[coordinates[1] - 32:coordinates[1] + 32, coordinates[0] - 32:coordinates[0] + 32, :]
if np.shape(thumb) == (64, 64, 3):
x.append(thumb)
y.append(lion_class)
except:
pass
for i in range(0, np.shape(cut)[0], 224):
for j in range(0, np.shape(cut)[1], 224):
thumb = cut[i:i + 64, j:j + 64, :]
if np.amin(cv2.cvtColor(thumb, cv2.COLOR_BGR2GRAY)) != 0:
if np.shape(thumb) == (64, 64, 3):
x.append(thumb)
y.append("negative")
class_names.append("negative")
x = np.array(x)
y = np.array(y)
encoder = LabelBinarizer()
encoder.fit(y)
y = encoder.transform(y).astype(float)
my_model = '2017-06-25_model.h5' # what is the model file named?
model = load_model(my_dir + my_model)
test_file_names = os.listdir(my_dir + "Test/")
test_file_names = sorted(test_file_names, key=lambda
item: (int(item.partition('.')[0]) if item[0].isdigit() else float('inf'), item))
# select a subset of files to run on
# test_file_names = test_file_names[0:7]
print(len(test_file_names)) # 18636
#test_file_names = test_file_names[0:2000]
#test_file_names = test_file_names[2000:4000]
test_file_names = test_file_names[4000:6000]
# test_file_names = test_file_names[6000:8000]
# test_file_names = test_file_names[8000:10000]
# test_file_names = test_file_names[10000:12000]
# test_file_names = test_file_names[12000:14000]
# test_file_names = test_file_names[14000:]
print(len(test_file_names)) #
# dataframe to store results in
test_coordinates_df = pd.DataFrame(0, index=test_file_names, columns=class_names)
# print(test_file_names[:5])
# print(test_coordinates_df)
# GPU 3
with tf.device('/gpu:2'):
for filename in tqdm(test_file_names):
file_int = int(filename[:-4])
current_time = datetime.datetime.now().time().isoformat()[:5]
if file_int % 500 == 0:
print('completed %d images at %s' % (file_int, current_time))
img = cv2.imread(my_dir + "Test/" + filename)
x_test = []
for i in range(0, np.shape(img)[0], 64):
for j in range(0, np.shape(img)[1], 64):
thumb = img[i:i + 64, j:j + 64, :]
if np.shape(thumb) == (64, 64, 3):
x_test.append(thumb)
x_test = np.array(x_test)
y_predicted = model.predict(x_test, verbose=0)
y_predicted = encoder.inverse_transform(y_predicted)
the_counter = Counter(y_predicted)
# print(the_counter)
for key in the_counter:
test_coordinates_df.set_value(index=filename, col=key, value=the_counter[key])
protect_df = test_coordinates_df
# print(test_coordinates_df)
del test_coordinates_df['negative']
test_coordinates_df = test_coordinates_df[['adult_males', 'subadult_males', 'adult_females', 'juveniles', 'pups']]
print(test_coordinates_df)
test_coordinates_df.to_csv(my_dir + datetime.date.today().isoformat() + '_submission_pt3.csv')
|
mit
|
alfayez/gnuradio
|
gr-filter/examples/fir_filter_ccc.py
|
13
|
3154
|
#!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
bwpriest/rossmannsales
|
src/testing_chassis.py
|
1
|
2910
|
import numpy as np
import pandas as pd
import data_manipulation as dm
from sklearn import cross_validation
from baseline_simple_median import MedianEstimator
"""
A basic example of a mean-squared-error k-fold cross validation approach
using the dumb MedianEstimator. We can use this same approach for any
estimator that implements 'fit(self, X, y)' and 'predict(self, X)',
assuming that we handle the data correctly.
NOTE [BWP]: I'll create a more general framework for doing this in the
near future, such that all you need to do is pass your estimator to a
function. It isn't too complicated right now, but I'd like to abstract
away the data handling bit.
"""
def cross_val(train, estimator, features, id, kwargs={}, pivot='Store', target='Sales', num_folds=10):
return cross_validation.cross_val_score(estimator(**kwargs),
train.loc[train[pivot] == id][features],
train.loc[train[pivot] == id][target],
scoring='mean_squared_error',
cv=num_folds)
def sample_cross_validation(num_folds=10, path=dm.enriched_path, fname=dm.train_fname ):
# load the labeled training data
train_features = pd.read_csv(path + fname)
# Identify the targets
train_targets = train_features['Sales']
# drop unhelpful columns
train_features = train_features.drop(['Customers','Sales'], axis=1)
# auto-cross-validate the estimator by training it on cuts of the
# training data and testing it on the rest.
sc = cross_validation.cross_val_score(MedianEstimator(),
train_features,
train_targets,
scoring = 'mean_squared_error',
cv=num_folds)
print(sc)
def sample_differences(holdout_size=0.2):
# load the labeled training data
train_features = pd.read_csv(dm.enriched_path + dm.train_fname)
# Identify the targets
train_targets = train_features['Sales']
# drop unhelpful columns
train_features = train_features.drop(['Customers','Sales'], axis=1)
# Split the labeled data into a notional 'training' set X_train and
# a label set y_train, as well as holdouts X_test and y_test.
X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_features,
train_targets,
test_size=holdout_size,
random_state=0)
# fit an estimator
me = MedianEstimator()
me.fit(X_test, y_test)
# produce predictions for
pr = me.predict(X_train)
return pr, y_test
|
apache-2.0
|
shikhardb/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
ilanfri/StatsML
|
circle_through_point.py
|
1
|
1459
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
# Given any 2D Cartesian input coordinate (xinput, yinput) compute and plot
# a semi-circle which goes both through the point and the origin
# in the (+,+) quadrant
def yfunc(x, x0):
return np.sqrt(x0 ** 2 - (x - x0) ** 2)
def radius(xinput, yinput):
xinput = float(xinput)
yinput = float(yinput)
return 1 / (2 * xinput) * (xinput ** 2 + yinput ** 2)
xinput = 2
yinput = 3
r = radius(xinput, yinput)
print "Radius:", r
xvalues = np.linspace(0, 2 * r, 100)
fig = plt.figure()
plt.plot(xvalues, yfunc(xvalues, r))
plt.plot(xinput, yinput, 'o')
plt.show()
# DERIVATION OF RELEVANT EQUATIONS:
# The equation of a circle with centre at (x0,y0) is
# (x-x0)**2 + (y-y0)**2 = r**2
# But our circle is always centred on the x-axis, so y0 = 0
# (x-x0)**2 + y**2 = r**2 (***)
# Applying the constraint that the circle go through the origin (0,0) gives
# r = x0
# Using the substitution r=x0 on equation (***) and applying the constraint that the circle must
# also go through our specified point X = (x1,y1) gives
# (x1 - x0)**2 + y1**2 = x0**2
# and solving this for x0 gives
# x0 = 1/(2*x1) * (x1**2 + y1**2)
# This is both our x-coordinate centre for the circle, as well as its radius (since r = x0)
# Inserting this result for x0 and r into equation (***) and solving for y gives us
# the desired equation for the circle
|
bsd-3-clause
|
zhmxu/nyu_ml_lectures
|
notebooks/figures/plot_2d_separator.py
|
41
|
1513
|
import numpy as np
import matplotlib.pyplot as plt
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None):
if eps is None:
eps = X.std() / 2.
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0]
fill_levels = [decision_values.min(), 0, decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5]
fill_levels = [0, .5, 1]
if ax is None:
ax = plt.gca()
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, colors=['blue', 'red'])
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if __name__ == '__main__':
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(centers=2, random_state=42)
clf = LogisticRegression().fit(X, y)
plot_2d_separator(clf, X, fill=True)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
|
cc0-1.0
|
trungnt13/scikit-learn
|
benchmarks/bench_plot_lasso_path.py
|
301
|
4003
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
|
bsd-3-clause
|
abhishekkr/tutorials_as_code
|
talks-articles/machine-learning/toolbox/numpy/singular-value-decomposition.py
|
1
|
1790
|
#!/usr/bin/env python3
## source: https://machinelearningmastery.com/singular-value-decomposition-for-machine-learning/
from numpy import array
from numpy import diag
from numpy import dot
from numpy import zeros
from scipy.linalg import svd
# Singular-value decomposition
def convert_to_svd(matrix):
U, s, VT = svd(matrix)
return U,s,VT
# Reconstruct SVD
def reconstruct_source_from_svd(U,s,VT, row_size, col_size, truncate=False, n_elements=2):
"""
## truncate,n_elements flow is also available via TruncatedSVD
from sklearn.decomposition import TruncatedSVD
# svd
svd = TruncatedSVD(n_components=2)
svd.fit(A)
result = svd.transform(A)
print(result)
"""
# create m x n Sigma matrix
Sigma = zeros((row_size, col_size))
if row_size == col_size:
Sigma = diag(s)
elif row_size < col_size:
# populate Sigma with m x m diagonal matrix
Sigma[:row_size, :row_size] = diag(s)
elif row_size > col_size:
# populate Sigma with n x n diagonal matrix
Sigma[:col_size, :col_size] = diag(s)
if truncate == True:
Sigma = Sigma[:, :n_elements]
VT = VT[:n_elements, :]
# transform
T = U.dot(Sigma)
print(T)
T = A.dot(VT.T)
print(T)
# reconstruct matrix
return U.dot(Sigma.dot(VT))
if __name__ == "__main__":
# define main data matrix
A = array([
[1, 200, 3, 4],
[3, 400, 500, 6],
[500, 6, 7, 800],
])
print("Main Matrix:\n%s" % A)
print("rows: %s, cols: %s" % (A.shape[0], A.shape[1]))
U,s,VT = convert_to_svd(A)
print("\nSVD:\n%s" % s)
print("%s singular-values" % len(s))
ReA = reconstruct_source_from_svd(U,s,VT, 3, 4)
print("\nReconstructed Matrix:\n %s" % A)
|
mit
|
trafferty/utils
|
python/realtime_plotting/lineplot.py
|
1
|
2290
|
#from PyQt4.QtCore import *
#from PyQt4.QtGui import *
from PySide.QtCore import *
from PySide.QtGui import *
from numpy import nan
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class LinePlot(FigureCanvas):
def __init__(self, parent):
self.figure = Figure()
super(LinePlot, self).__init__(self.figure)
self.setParent(parent)
self.axes = self.figure.add_axes([0.1, 0.1, .9, .8])
self.axes.clear()
self.axes.grid()
[self.line_plt] = self.axes.plot([])
self.draw()
class LinePlotWidget(QWidget):
def __init__(self, parent=None, title=None):
super(LinePlotWidget, self).__init__(parent)
if title:
self.setWindowTitle(title)
self._layout = QHBoxLayout(self)
self._line_plot = LinePlot(self.parentWidget())
self._layout.addWidget(self._line_plot)
def set_data(self, data):
self._line_plot.line_plt.set_data(*data)
self._line_plot.axes.relim()
self._line_plot.axes.autoscale_view()
self._line_plot.axes.draw_artist(self._line_plot.line_plt)
self._line_plot.draw()
@property
def ylim(self):
return self.ylim
@ylim.setter
def ylim(self, ylim):
self._line_plot.axes.set_ylim(*ylim)
@property
def xlim(self):
return self.xlim
@xlim.setter
def xlim(self, xlim):
self._line_plot.axes.set_xlim(*xlim)
@property
def xticks(self):
return self.xticks
@xticks.setter
def xticks(self, xticks):
self._line_plot.axes.set_xticks(xticks)
@property
def xlabel(self):
return self.xlabel
@xlabel.setter
def xlabel(self, xlabel):
self._line_plot.axes.set_xlabel(xlabel)
@property
def ylabel(self):
return self.ylabel
@ylabel.setter
def ylabel(self, ylabel):
self._line_plot.axes.set_ylabel(ylabel)
@property
def title(self):
return self.title
@title.setter
def title(self, title):
self._line_plot.axes.set_title(title)
def annotate(self, str, coords):
self._line_plot.axes.annotate(str, coords, xycoords='axes fraction')
self._line_plot.draw()
|
gpl-2.0
|
JPFrancoia/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
14
|
67163
|
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr in dir(self):
if attr.startswith("hyperparameter_"):
r.append(getattr(self, attr))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
|
bsd-3-clause
|
bikong2/scikit-learn
|
sklearn/manifold/setup.py
|
99
|
1243
|
import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
albertaparicio/tfg-voice-conversion
|
seq2seq_plot_curves.py
|
1
|
5888
|
# Created by Albert Aparicio on 6/12/16
# coding: utf-8
# This script takes the results of a training and plots its loss curves
import h5py
import matplotlib.pyplot as plt
import numpy as np
model_description = 'seq2seq_pretrain'
with h5py.File('training_results/' + model_description + '_training_params.h5',
'r') as f:
params_loss = f.attrs.get('params_loss').decode('utf-8')
flags_loss = f.attrs.get('flags_loss').decode('utf-8')
optimizer_name = f.attrs.get('optimizer').decode('utf-8')
nb_epochs = f.attrs.get('epochs')
learning_rate = f.attrs.get('learning_rate')
metrics_names = [name.decode('utf-8') for name in
f.attrs.get('metrics_names')]
f.close()
epoch = np.loadtxt('training_results/' + model_description + '_' + params_loss
+ '_' + flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) + '_epochs.csv',
delimiter=',')
losses = np.loadtxt('training_results/' + model_description + '_' + params_loss
+ '_' + flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) +
'_loss.csv', delimiter=',')
val_losses = np.loadtxt(
'training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' + str(nb_epochs) + '_lr_' +
str(learning_rate) + '_val_loss.csv', delimiter=',')
mcd = np.loadtxt(
'training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' + str(nb_epochs) + '_lr_' +
str(learning_rate) + '_mcd.csv', delimiter=',')
rmse = np.loadtxt(
'training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' + str(nb_epochs) + '_lr_' +
str(learning_rate) + '_rmse.csv', delimiter=',')
acc = np.loadtxt(
'training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' + str(nb_epochs) + '_lr_' +
str(learning_rate) + '_acc.csv', delimiter=',')
assert (val_losses.size == losses.size)
# ##############################################
# # TODO Comment after dev
# metrics_names = ['loss', 'params_output_loss', 'flags_output_loss']
#
# ##############################################
# Losses plot
h1 = plt.figure(figsize=(14, 8))
ax1 = h1.add_subplot(111)
plt.plot(epoch, losses, epoch, val_losses, '--', linewidth=2)
# Prepare legend
legend_list = list(metrics_names) # We use list() to make a copy
for name in metrics_names:
legend_list.append('val_' + name)
plt.legend(legend_list, loc='best')
plt.suptitle('Parameters loss: ' + params_loss + ', Flags loss: ' + flags_loss +
', Optimizer: ' + optimizer_name + ', Epochs: ' + str(nb_epochs) +
', Learning rate: ' + str(learning_rate))
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss values')
ax1.set_xlim(0, 19)
major_xticks = np.arange(0, 20, 1)
ax1.set_xticks(major_xticks)
ax1.tick_params(which='both', direction ='out')
ax1.grid(which='both', ls='-')
plt.savefig('training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) + '_graph.eps',
bbox_inches='tight')
# plt.show()
plt.close(h1)
# Metrics plot
h2 = plt.figure(figsize=(10, 5))
ax2 = h2.add_subplot(111)
plt.plot(epoch, mcd) # , epoch, rmse, epoch, acc)
plt.legend(['MCD (dB)'], loc='best')
# , 'RMSE', 'Accuracy'
plt.suptitle("Cepstral features' MCD", fontsize = 12)
# , RMSE and ACC
ax2.set_xlabel('Epochs')
ax2.set_ylabel('MCD (dB)')
ax2.set_xlim(0, 19)
major_xticks = np.arange(0, 20, 1)
major_yticks = np.arange(np.floor(np.min(mcd)), np.ceil(np.max(mcd)), 0.2)
ax2.set_xticks(major_xticks)
ax2.set_yticks(major_yticks)
ax2.tick_params(which='both', direction ='out')
ax2.grid(which='both', ls='-')
plt.savefig('training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) + '_mcd.eps',
bbox_inches='tight')
plt.close(h2)
h2 = plt.figure(figsize=(10, 5))
ax2 = h2.add_subplot(111)
plt.plot(epoch, rmse)
plt.legend(['RMSE'], loc='best')
# , 'RMSE', 'Accuracy'
plt.suptitle("Pitch Root Mean Square Error (RMSE)", fontsize=12)
# , RMSE and ACC
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Root Mean Square Error (RMSE)')
ax2.set_xlim(0, 19)
major_xticks = np.arange(0, 20, 1)
major_yticks = np.arange(0, np.ceil(np.max(rmse*100))/100, 0.01)
ax2.set_xticks(major_xticks)
ax2.set_yticks(major_yticks)
ax2.tick_params(which='both', direction ='out')
ax2.grid(which='both', ls='-')
plt.savefig('training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) + '_rmse.eps',
bbox_inches='tight')
plt.close(h2)
h2 = plt.figure(figsize=(10, 5))
ax2 = h2.add_subplot(111)
plt.plot(epoch, acc)
plt.legend(['Accuracy'], loc='best')
plt.suptitle("U/V Flag Accuracy", fontsize=12)
# , RMSE and ACC
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Accuracy')
ax2.set_xlim(0, 19)
major_xticks = np.arange(0, 20, 1)
major_yticks = np.arange(
np.floor(np.min(acc*100))/100,
1.005,
0.005
)
ax2.set_xticks(major_xticks)
ax2.set_yticks(major_yticks)
ax2.tick_params(which='both', direction ='out')
ax2.grid(which='both', ls='-')
plt.savefig('training_results/' + model_description + '_' + params_loss + '_' +
flags_loss + '_' + optimizer_name + '_epochs_' +
str(nb_epochs) + '_lr_' + str(learning_rate) + '_acc.eps',
bbox_inches='tight')
plt.close(h2)
exit()
|
gpl-3.0
|
SoftwareLiteracyFoundation/BAM
|
shoals.py
|
1
|
5171
|
'''Shoal class for the Bay Assessment Model (BAM)'''
#---------------------------------------------------------------
#
#---------------------------------------------------------------
class Shoal:
"""Variables for each of the 410 shoals in Florida Bay.
Fluxes across the shoal are signed, identifying the 'upstream'
basin (A or B). Water level and concentration differences are gradients
between the adjacent basins. Velocities are given for each depth
on a shoal."""
def __init__( self, model ):
self.model = model
# matplotlib Figure variables
self.line_xy = None # Read from shapefile
self.Axes_plot = None # Created by matplotlib plot() (Line2D)
# Basins for this shoal
self.Basin_A = None # Basin object
self.Basin_B = None # Basin object
self.Basin_A_key = None # Basin number : key in Basins map
self.Basin_B_key = None # Basin number : key in Basins map
# Physical variables
# JP: All of these dictionaries share the same keys
# Some efficiency might be gained with one dictionary using
# depth_ft keys holding dictionaries with the { variable : values }
self.velocity = dict() # { depth(ft) : (m/s) }
self.wet_length = dict() # { depth(ft) : (m) }
self.friction_factor = dict() # { depth(ft) : factor }
self.h_upstream = dict() # { depth(ft) : (m) }
self.h_downstream = dict() # { depth(ft) : (m) }
self.cross_section = dict() # { depth(ft) : (m^2) }
self.hydraulic_radius = dict() # { depth(ft) : (m) }
self.manning_coefficient = None #
self.land_length = None # (m)
self.width = None # (m)
self.cross_section_total = 0 # (m^2)
self.level_difference = 0 # (m)
self.no_flow = False # True if land with 0 shoal width
self.initial_velocity = False # True 1st VelocityHydraulicRadius()
# Volume transports
self.flow_sign = 0 # -1, 0, 1 : B -> A, None, A -> B
self.Q = dict() # { depth(ft) : Q(m^3/s) }
self.Q_total = 0 # (m^3/s)
self.volume_A_B = 0 # (m^3/timestep)
self.volume_B_A = 0 # (m^3/timestep)
self.volume_residual = 0 # (m^3/timestep)
self.volume_total = 0 # (m^3/timestep)
# Solute transports
# self.solute_transport_A_B = None # (mol/time)
# self.solute_transport_B_A = None # (mol/time)
# self.solute_residual_transport = None # (mol/time)
# self.solute_total_transport = None # (mol/time)
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def Print( self, shoal_number = None, print_all = False ) :
'''Display shoal info on the gui msgText box.'''
Basin_A = self.Basin_A
Basin_B = self.Basin_B
shoalInfo = '\nShoal: ' + str( shoal_number ) + ' ' +\
Basin_A.name + ' [' + str( self.Basin_A_key ) + '] ' +\
str( round( Basin_A.water_level, 2 ) ) + ' (m) to ' +\
Basin_B.name + ' [' + str( self.Basin_B_key ) + '] ' +\
str( round( Basin_B.water_level, 2 ) ) + ' (m)]\n'
shoalInfo = shoalInfo +\
'Width: ' + str( self.width ) + ' (m)' +\
' Manning: ' + str( self.manning_coefficient ) +\
' Land Length: ' + str( self.land_length ) + ' (m)'
shoalInfo = shoalInfo + '\nh_upstream: '
for depth, h in self.h_upstream.items() :
shoalInfo = shoalInfo + str( depth ) + 'ft: ' +\
str( round( h, 3 ) ) + ' '
shoalInfo = shoalInfo + '(m)'
shoalInfo = shoalInfo + '\nh_downstream: '
for depth, h in self.h_downstream.items() :
shoalInfo = shoalInfo + str( depth ) + 'ft: ' +\
str( round( h, 3 ) ) + ' '
shoalInfo = shoalInfo + '(m)'
shoalInfo = shoalInfo + '\nVelocities: '
for depth, velocity in self.velocity.items() :
shoalInfo = shoalInfo + str( depth ) + 'ft: ' +\
str( round( velocity, 3 ) ) + ' '
shoalInfo = shoalInfo + '(m/s)'
shoalInfo = shoalInfo + '\nQ: '
for depth, Q in self.Q.items() :
shoalInfo = shoalInfo + str( depth ) + 'ft: ' +\
str( round( Q, 3 ) ) + ' '
shoalInfo = shoalInfo + '(m^3/s) Q_total: ' +\
str( round( self.Q_total, 1 ) ) + ' (m^3/s)\n'
if print_all :
shoalInfo = shoalInfo + '\nWet Length: '
for depth, length in self.wet_length.items() :
shoalInfo = shoalInfo + str( int( depth ) ) + 'ft: ' +\
str( round( length ) ) + ' '
shoalInfo = shoalInfo + '(m)\n'
self.model.gui.Message( shoalInfo )
|
gpl-3.0
|
OshynSong/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
chrisburr/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
103
|
22297
|
"""
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
|
bsd-3-clause
|
IAS-ZHAW/machine_learning_scripts
|
mlscripts/ml/som/plot_graphs.py
|
1
|
2006
|
# released under bsd license
# see LICENSE file or http://www.opensource.org/licenses/bsd-license.php for details
# Institute of Applied Simulation (ZHAW)
# Author Timo Jeranko
import scipy.interpolate as interpolate
import matplotlib.pyplot as pyplot
import matplotlib.cm as cm
from scipy import *
import random
from mlscripts.ml.som.visualize import *
from mlscripts.ml.som.flatten import *
def shade_map(distances):
im = pyplot.imshow(distances,interpolation="nearest",cmap=cm.gray,origin='lower')
pyplot.show()
def shade_map_clusters(distances,cluster_map):
im = pyplot.imshow(distances,interpolation='nearest',cmap=cm.gray,origin='lower')
pyplot.show()
#pyplot.figure()
print distances
cluster_color_map = convert_cluster_map(cluster_map)
pyplot.imshow(cluster_color_map,interpolation='nearest',alpha=0.5,origin='lower')
pyplot.show()
def get_clusters(cluster_map):
out = []
for row in cluster_map:
for name in row:
if name not in out:
out.append(name)
return out
def convert_cluster_map(cluster_map):
clusters = get_clusters(cluster_map)
x = len(clusters)
delta = 1.0/x
colors = []
color = 0
for i in range(x):
color += delta
colors.append(color)
# print "colors", colors
out = []
for row in cluster_map:
outrow = []
for name in row:
i = clusters.index(name)
outrow.append(colors[i])
out.append(outrow)
return out
def shade_map_structured(words,distances):
im = pyplot.imshow(distances,interpolation='nearest',cmap=cm.gray,origin='lower')
for i,row in enumerate(words):
for j,word in enumerate(row):
if word != "":
pyplot.text(j,i,word,fontsize=8).set_color('red')
# pyplot.annotate(word,xy=(j,i)).set_color('red')
pyplot.show()
def get_spline():
print 0
# f = interpolate.LSQBivariateSpline(x,y,z,tx,ty)
|
bsd-3-clause
|
Evfro/fifty-shades
|
polara/tools/netflix.py
|
1
|
1132
|
import pandas as pd
import tarfile
def get_netflix_data(gz_file):
movie_data = []
movie_name = []
with tarfile.open(gz_file) as tar:
training_data = tar.getmember('download/training_set.tar')
with tarfile.open(fileobj=tar.extractfile(training_data)) as inner:
for item in inner.getmembers():
if item.isfile():
f = inner.extractfile(item.name)
df = pd.read_csv(f)
movieid = df.columns[0]
movie_name.append(movieid)
movie_data.append(df[movieid])
data = pd.concat(movie_data, keys=movie_name)
data = data.reset_index().iloc[:, :3].rename(columns={'level_0':'movieid',
'level_1':'userid',
'level_2':'rating'})
return data
def filter_by_length(data, session_length=20):
sz = data.groupby('userid', sort=False).size()
valid_users = sz.index[(sz > session_length)]
new_data = data[data.userid.isin(valid_users)]
return new_data
|
mit
|
tomlof/scikit-learn
|
examples/svm/plot_rbf_parameters.py
|
20
|
8048
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_splits`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r,
edgecolors='k')
plt.xticks(())
plt.yticks(())
plt.axis('tight')
scores = grid.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
bsd-3-clause
|
Eric89GXL/mne-python
|
mne/viz/backends/_pyvista.py
|
2
|
43764
|
"""
Core visualization operations based on PyVista.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Guillaume Favelier <[email protected]>
# Joan Massich <[email protected]>
#
# License: Simplified BSD
from contextlib import contextmanager
from distutils.version import LooseVersion
import os
import sys
import warnings
import numpy as np
import vtk
from .base_renderer import _BaseRenderer
from ._utils import (_get_colormap_from_array, _alpha_blend_background,
ALLOWED_QUIVER_MODES)
from ...fixes import _get_args
from ...utils import copy_base_doc_to_subclass_doc, _check_option
from ...externals.decorator import decorator
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
from pyvista import Plotter, PolyData, Line, close_all, UnstructuredGrid
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
from pyvista import BackgroundPlotter
from pyvista.utilities import try_callback
from pyvista.plotting.plotting import _ALL_PLOTTERS
VTK9 = LooseVersion(getattr(vtk, 'VTK_VERSION', '9.0')) >= LooseVersion('9.0')
_FIGURES = dict()
class _Figure(object):
def __init__(self, plotter=None,
plotter_class=None,
display=None,
show=False,
title='PyVista Scene',
size=(600, 600),
shape=(1, 1),
background_color='black',
smooth_shading=True,
off_screen=False,
notebook=False):
self.plotter = plotter
self.plotter_class = plotter_class
self.display = display
self.background_color = background_color
self.smooth_shading = smooth_shading
self.notebook = notebook
self.store = dict()
self.store['show'] = show
self.store['title'] = title
self.store['window_size'] = size
self.store['shape'] = shape
self.store['off_screen'] = off_screen
self.store['border'] = False
self.store['auto_update'] = False
# multi_samples > 1 is broken on macOS + Intel Iris + volume rendering
self.store['multi_samples'] = 1 if sys.platform == 'darwin' else 4
def build(self):
if self.plotter_class is None:
self.plotter_class = BackgroundPlotter
if self.notebook:
self.plotter_class = Plotter
if self.plotter_class is Plotter:
self.store.pop('show', None)
self.store.pop('title', None)
self.store.pop('auto_update', None)
if self.plotter is None:
if self.plotter_class is BackgroundPlotter:
from PyQt5.QtWidgets import QApplication
app = QApplication.instance()
if app is None:
app = QApplication(["MNE"])
self.store['app'] = app
plotter = self.plotter_class(**self.store)
plotter.background_color = self.background_color
self.plotter = plotter
if self.plotter_class is BackgroundPlotter and \
hasattr(BackgroundPlotter, 'set_icon'):
_init_resources()
_process_events(plotter)
plotter.set_icon(":/mne-icon.png")
_process_events(self.plotter)
_process_events(self.plotter)
return self.plotter
def is_active(self):
if self.plotter is None:
return False
return hasattr(self.plotter, 'ren_win')
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : None
Scene sensors handle.
"""
def __init__(self, xy=None, pts=None):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
def visible(self, state):
"""Modify visibility attribute of the sensors."""
self.pts.SetVisibility(state)
def _enable_aa(figure, plotter):
"""Enable it everywhere except Azure."""
# XXX for some reason doing this on Azure causes access violations:
# ##[error]Cmd.exe exited with code '-1073741819'
# So for now don't use it there. Maybe has to do with setting these
# before the window has actually been made "active"...?
# For Mayavi we have an "on activated" event or so, we should look into
# using this for Azure at some point, too.
if os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true':
return
if figure.is_active():
if sys.platform != 'darwin':
plotter.enable_anti_aliasing()
plotter.ren_win.LineSmoothingOn()
@copy_base_doc_to_subclass_doc
class _Renderer(_BaseRenderer):
"""Class managing rendering scene.
Attributes
----------
plotter: Plotter
Main PyVista access point.
name: str
Name of the window.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name="PyVista Scene", show=False, shape=(1, 1),
notebook=None, smooth_shading=True):
from .renderer import MNE_3D_BACKEND_TESTING
from .._3d import _get_3d_option
figure = _Figure(show=show, title=name, size=size, shape=shape,
background_color=bgcolor, notebook=notebook,
smooth_shading=smooth_shading)
self.font_family = "arial"
self.tube_n_sides = 20
self.shape = shape
antialias = _get_3d_option('antialias')
self.antialias = antialias and not MNE_3D_BACKEND_TESTING
if isinstance(fig, int):
saved_fig = _FIGURES.get(fig)
# Restore only active plotter
if saved_fig is not None and saved_fig.is_active():
self.figure = saved_fig
else:
self.figure = figure
_FIGURES[fig] = self.figure
elif fig is None:
self.figure = figure
else:
self.figure = fig
# Enable off_screen if sphinx-gallery or testing
if pyvista.OFF_SCREEN:
self.figure.store['off_screen'] = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if MNE_3D_BACKEND_TESTING:
self.tube_n_sides = 3
# smooth_shading=True fails on MacOS CIs
self.figure.smooth_shading = False
with _disabled_depth_peeling():
self.plotter = self.figure.build()
self.plotter.hide_axes()
if hasattr(self.plotter, "default_camera_tool_bar"):
self.plotter.default_camera_tool_bar.close()
if hasattr(self.plotter, "saved_cameras_tool_bar"):
self.plotter.saved_cameras_tool_bar.close()
if self.antialias:
_enable_aa(self.figure, self.plotter)
# FIX: https://github.com/pyvista/pyvistaqt/pull/68
if LooseVersion(pyvista.__version__) >= '0.27.0':
if not hasattr(self.plotter, "iren"):
self.plotter.iren = None
self.update_lighting()
@contextmanager
def ensure_minimum_sizes(self):
sz = self.figure.store['window_size']
# plotter: pyvista.plotting.qt_plotting.BackgroundPlotter
# plotter.interactor: vtk.qt.QVTKRenderWindowInteractor.QVTKRenderWindowInteractor -> QWidget # noqa
# plotter.app_window: pyvista.plotting.qt_plotting.MainWindow -> QMainWindow # noqa
# plotter.frame: QFrame with QVBoxLayout with plotter.interactor as centralWidget # noqa
# plotter.ren_win: vtkXOpenGLRenderWindow
self.plotter.interactor.setMinimumSize(*sz)
try:
yield # show
finally:
# 1. Process events
_process_events(self.plotter)
_process_events(self.plotter)
# 2. Get the window size that accommodates the size
sz = self.plotter.app_window.size()
# 3. Call app_window.setBaseSize and resize (in pyvistaqt)
self.plotter.window_size = (sz.width(), sz.height())
# 4. Undo the min size setting and process events
self.plotter.interactor.setMinimumSize(0, 0)
_process_events(self.plotter)
_process_events(self.plotter)
# 5. Resize the window (again!) to the correct size
# (not sure why, but this is required on macOS at least)
self.plotter.window_size = (sz.width(), sz.height())
_process_events(self.plotter)
_process_events(self.plotter)
def subplot(self, x, y):
x = np.max([0, np.min([x, self.shape[0] - 1])])
y = np.max([0, np.min([y, self.shape[1] - 1])])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.subplot(x, y)
if self.antialias:
_enable_aa(self.figure, self.plotter)
def scene(self):
return self.figure
def _orient_lights(self):
lights = list(self.plotter.renderer.GetLights())
lights.pop(0) # unused headlight
lights[0].SetPosition(_to_pos(45.0, -45.0))
lights[1].SetPosition(_to_pos(-30.0, 60.0))
lights[2].SetPosition(_to_pos(-30.0, -60.0))
def update_lighting(self):
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
lights = list(self.plotter.renderer.GetLights())
headlight = lights.pop(0)
headlight.SetSwitch(False)
for i in range(len(lights)):
if i < 3:
lights[i].SetSwitch(True)
lights[i].SetIntensity(1.0)
lights[i].SetColor(1.0, 1.0, 1.0)
else:
lights[i].SetSwitch(False)
lights[i].SetPosition(_to_pos(0.0, 0.0))
lights[i].SetIntensity(1.0)
lights[i].SetColor(1.0, 1.0, 1.0)
lights[0].SetPosition(_to_pos(45.0, 45.0))
lights[1].SetPosition(_to_pos(-30.0, -60.0))
lights[1].SetIntensity(0.6)
lights[2].SetPosition(_to_pos(-30.0, 60.0))
lights[2].SetIntensity(0.5)
def set_interaction(self, interaction):
if not hasattr(self.plotter, "iren") or self.plotter.iren is None:
return
if interaction == "rubber_band_2d":
for renderer in self.plotter.renderers:
renderer.enable_parallel_projection()
if hasattr(self.plotter, 'enable_rubber_band_2d_style'):
self.plotter.enable_rubber_band_2d_style()
else:
style = vtk.vtkInteractorStyleRubberBand2D()
self.plotter.interactor.SetInteractorStyle(style)
else:
for renderer in self.plotter.renderers:
renderer.disable_parallel_projection()
getattr(self.plotter, f'enable_{interaction}_style')()
def polydata(self, mesh, color=None, opacity=1.0, normals=None,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1.,
polygon_offset=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
rgba = False
if color is not None and len(color) == mesh.n_points:
if color.shape[1] == 3:
scalars = np.c_[color, np.ones(mesh.n_points)]
else:
scalars = color
scalars = (scalars * 255).astype('ubyte')
color = None
rgba = True
if isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
colormap = colormap.astype(np.float64) / 255.
from matplotlib.colors import ListedColormap
colormap = ListedColormap(colormap)
if normals is not None:
mesh.point_arrays["Normals"] = normals
mesh.GetPointData().SetActiveNormals("Normals")
else:
_compute_normals(mesh)
if 'rgba' in kwargs:
rgba = kwargs["rgba"]
kwargs.pop('rgba')
actor = _add_mesh(
plotter=self.plotter,
mesh=mesh, color=color, scalars=scalars,
rgba=rgba, opacity=opacity, cmap=colormap,
backface_culling=backface_culling,
rng=[vmin, vmax], show_scalar_bar=False,
smooth_shading=self.figure.smooth_shading,
interpolate_before_map=interpolate_before_map,
style=representation, line_width=line_width, **kwargs,
)
if polygon_offset is not None:
mapper = actor.GetMapper()
mapper.SetResolveCoincidentTopologyToPolygonOffset()
mapper.SetRelativeCoincidentTopologyPolygonOffsetParameters(
polygon_offset, polygon_offset)
return actor, mesh
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
vertices = np.c_[x, y, z]
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
interpolate_before_map=interpolate_before_map,
representation=representation,
line_width=line_width,
polygon_offset=polygon_offset,
**kwargs,
)
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if colormap is not None:
colormap = _get_colormap_from_array(colormap,
normalized_colormap)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
mesh = PolyData(vertices, triangles)
mesh.point_arrays['scalars'] = scalars
contour = mesh.contour(isosurfaces=contours)
line_width = width
if kind == 'tube':
contour = contour.tube(radius=width, n_sides=self.tube_n_sides)
line_width = 1.0
actor = _add_mesh(
plotter=self.plotter,
mesh=contour,
show_scalar_bar=False,
line_width=line_width,
color=color,
rng=[vmin, vmax],
cmap=colormap,
opacity=opacity,
smooth_shading=self.figure.smooth_shading
)
return actor, contour
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
normals = surface.get('nn', None)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
colormap = _get_colormap_from_array(colormap, normalized_colormap)
if scalars is not None:
mesh.point_arrays['scalars'] = scalars
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
polygon_offset=polygon_offset,
)
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
factor = 1.0 if radius is not None else scale
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
if radius is not None:
sphere.SetRadius(radius)
sphere.Update()
geom = sphere.GetOutput()
mesh = PolyData(np.array(center))
glyph = mesh.glyph(orient=False, scale=False,
factor=factor, geom=geom)
actor = _add_mesh(
self.plotter,
mesh=glyph, color=color, opacity=opacity,
backface_culling=backface_culling,
smooth_shading=self.figure.smooth_shading
)
return actor, glyph
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
cmap = _get_colormap_from_array(colormap, normalized_colormap)
for (pointa, pointb) in zip(origin, destination):
line = Line(pointa, pointb)
if scalars is not None:
line.point_arrays['scalars'] = scalars[0, :]
scalars = 'scalars'
color = None
else:
scalars = None
tube = line.tube(radius, n_sides=self.tube_n_sides)
_add_mesh(
plotter=self.plotter,
mesh=tube,
scalars=scalars,
flip_scalars=reverse_lut,
rng=[vmin, vmax],
color=color,
show_scalar_bar=False,
cmap=cmap,
smooth_shading=self.figure.smooth_shading,
)
return tube
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, line_width=2., name=None,
glyph_width=None, glyph_depth=None,
solid_transform=None):
_check_option('mode', mode, ALLOWED_QUIVER_MODES)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
factor = scale
vectors = np.c_[u, v, w]
points = np.vstack(np.c_[x, y, z])
n_points = len(points)
cell_type = np.full(n_points, vtk.VTK_VERTEX)
cells = np.c_[np.full(n_points, 1), range(n_points)]
args = (cells, cell_type, points)
if not VTK9:
args = (np.arange(n_points) * 3,) + args
grid = UnstructuredGrid(*args)
grid.point_arrays['vec'] = vectors
if scale_mode == 'scalar':
grid.point_arrays['mag'] = np.array(scalars)
scale = 'mag'
else:
scale = False
if mode == '2darrow':
return _arrow_glyph(grid, factor), grid
elif mode == 'arrow':
alg = _glyph(
grid,
orient='vec',
scalars=scale,
factor=factor
)
mesh = pyvista.wrap(alg.GetOutput())
else:
tr = None
if mode == 'cone':
glyph = vtk.vtkConeSource()
glyph.SetCenter(0.5, 0, 0)
glyph.SetRadius(0.15)
elif mode == 'cylinder':
glyph = vtk.vtkCylinderSource()
glyph.SetRadius(0.15)
elif mode == 'oct':
glyph = vtk.vtkPlatonicSolidSource()
glyph.SetSolidTypeToOctahedron()
else:
assert mode == 'sphere', mode # guaranteed above
glyph = vtk.vtkSphereSource()
if mode == 'cylinder':
if glyph_height is not None:
glyph.SetHeight(glyph_height)
if glyph_center is not None:
glyph.SetCenter(glyph_center)
if glyph_resolution is not None:
glyph.SetResolution(glyph_resolution)
tr = vtk.vtkTransform()
tr.RotateWXYZ(90, 0, 0, 1)
elif mode == 'oct':
if solid_transform is not None:
assert solid_transform.shape == (4, 4)
tr = vtk.vtkTransform()
tr.SetMatrix(
solid_transform.astype(np.float64).ravel())
if tr is not None:
# fix orientation
glyph.Update()
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputData(glyph.GetOutput())
trp.SetTransform(tr)
glyph = trp
glyph.Update()
geom = glyph.GetOutput()
mesh = grid.glyph(orient='vec', scale=scale, factor=factor,
geom=geom)
_add_mesh(
self.plotter,
mesh=mesh,
color=color,
opacity=opacity,
backface_culling=backface_culling
)
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
size = 14 if size is None else size
position = (x_window, y_window)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
actor = self.plotter.add_text(text, position=position,
font_size=size,
font=self.font_family,
color=color,
viewport=True)
if isinstance(justification, str):
if justification == 'left':
actor.GetTextProperty().SetJustificationToLeft()
elif justification == 'center':
actor.GetTextProperty().SetJustificationToCentered()
elif justification == 'right':
actor.GetTextProperty().SetJustificationToRight()
else:
raise ValueError('Expected values for `justification`'
'are `left`, `center` or `right` but '
'got {} instead.'.format(justification))
return actor
def text3d(self, x, y, z, text, scale, color='white'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
kwargs = dict(
points=[x, y, z],
labels=[text],
point_size=scale,
text_color=color,
font_family=self.font_family,
name=text,
shape_opacity=0,
)
if 'always_visible' in _get_args(self.plotter.add_point_labels):
kwargs['always_visible'] = True
self.plotter.add_point_labels(**kwargs)
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None, **extra_kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
kwargs = dict(color=color, title=title, n_labels=n_labels,
use_opacity=False, n_colors=256, position_x=0.15,
position_y=0.05, width=0.7, shadow=False, bold=True,
label_font_size=22, font_family=self.font_family,
background_color=bgcolor)
kwargs.update(extra_kwargs)
self.plotter.add_scalar_bar(**kwargs)
def show(self):
self.figure.display = self.plotter.show()
if hasattr(self.plotter, "app_window"):
with self.ensure_minimum_sizes():
self.plotter.app_window.show()
return self.scene()
def close(self):
_close_3d_figure(figure=self.figure)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
_set_3d_view(self.figure, azimuth=azimuth, elevation=elevation,
distance=distance, focalpoint=focalpoint, roll=roll,
reset_camera=reset_camera)
def reset_camera(self):
self.plotter.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.figure, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.plotter, xyz)
xy = dict(zip(ch_names, xy))
# pts = self.fig.children[-1]
pts = self.plotter.renderer.GetActors().GetLastItem()
return _Projection(xy=xy, pts=pts)
def enable_depth_peeling(self):
if not self.figure.store['off_screen']:
for renderer in self.plotter.renderers:
renderer.enable_depth_peeling()
def remove_mesh(self, mesh_data):
actor, _ = mesh_data
self.plotter.remove_actor(actor)
def _create_actor(mapper=None):
"""Create a vtkActor."""
actor = vtk.vtkActor()
if mapper is not None:
actor.SetMapper(mapper)
return actor
def _compute_normals(mesh):
"""Patch PyVista compute_normals."""
if 'Normals' not in mesh.point_arrays:
mesh.compute_normals(
cell_normals=False,
consistent_normals=False,
non_manifold_traversal=False,
inplace=True,
)
def _add_mesh(plotter, *args, **kwargs):
"""Patch PyVista add_mesh."""
from . import renderer
_process_events(plotter)
mesh = kwargs.get('mesh')
if 'smooth_shading' in kwargs:
smooth_shading = kwargs.pop('smooth_shading')
else:
smooth_shading = True
# disable rendering pass for add_mesh, render()
# is called in show()
if 'render' not in kwargs and 'render' in _get_args(plotter.add_mesh):
kwargs['render'] = False
actor = plotter.add_mesh(*args, **kwargs)
if smooth_shading and 'Normals' in mesh.point_arrays:
prop = actor.GetProperty()
prop.SetInterpolationToPhong()
if renderer.MNE_3D_BACKEND_TESTING:
actor.SetVisibility(False)
return actor
def _deg2rad(deg):
return deg * np.pi / 180.
def _rad2deg(rad):
return rad * 180. / np.pi
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
def _mat_to_array(vtk_mat):
e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)]
arr = np.array(e, dtype=float)
arr.shape = (4, 4)
return arr
def _3d_to_2d(plotter, xyz):
size = plotter.window_size
xyz = np.column_stack([xyz, np.ones(xyz.shape[0])])
# Transform points into 'unnormalized' view coordinates
comb_trans_mat = _get_world_to_view_matrix(plotter)
view_coords = np.dot(comb_trans_mat, xyz.T).T
# Divide through by the fourth element for normalized view coords
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# Transform from normalized view coordinates to display coordinates.
view_to_disp_mat = _get_view_to_display_matrix(size)
xy = np.dot(view_to_disp_mat, norm_view_coords.T).T
# Pull the first two columns since they're meaningful for 2d plotting
xy = xy[:, :2]
return xy
def _get_world_to_view_matrix(plotter):
cam = plotter.renderer.camera
scene_size = plotter.window_size
clip_range = cam.GetClippingRange()
aspect_ratio = float(scene_size[0]) / scene_size[1]
vtk_comb_trans_mat = cam.GetCompositeProjectionTransformMatrix(
aspect_ratio, clip_range[0], clip_range[1])
vtk_comb_trans_mat = _mat_to_array(vtk_comb_trans_mat)
return vtk_comb_trans_mat
def _get_view_to_display_matrix(size):
x, y = size
view_to_disp_mat = np.array([[x / 2.0, 0., 0., x / 2.0],
[0., -y / 2.0, 0., y / 2.0],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
return view_to_disp_mat
def _close_all():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
close_all()
_FIGURES.clear()
def _get_camera_direction(focalpoint, position):
x, y, z = position - focalpoint
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
return r, theta, phi, focalpoint
def _set_3d_view(figure, azimuth, elevation, focalpoint, distance, roll=None,
reset_camera=True):
position = np.array(figure.plotter.camera_position[0])
if reset_camera:
figure.plotter.reset_camera()
if focalpoint is None:
focalpoint = np.array(figure.plotter.camera_position[1])
r, theta, phi, fp = _get_camera_direction(focalpoint, position)
if azimuth is not None:
phi = _deg2rad(azimuth)
if elevation is not None:
theta = _deg2rad(elevation)
# set the distance
renderer = figure.plotter.renderer
bounds = np.array(renderer.ComputeVisiblePropBounds())
if distance is None:
distance = max(bounds[1::2] - bounds[::2]) * 2.0
if focalpoint is not None:
focalpoint = np.asarray(focalpoint)
else:
focalpoint = (bounds[1::2] + bounds[::2]) * 0.5
# Now calculate the view_up vector of the camera. If the view up is
# close to the 'z' axis, the view plane normal is parallel to the
# camera which is unacceptable, so we use a different view up.
if elevation is None or 5. <= abs(elevation) <= 175.:
view_up = [0, 0, 1]
else:
view_up = [np.sin(phi), np.cos(phi), 0]
position = [
distance * np.cos(phi) * np.sin(theta),
distance * np.sin(phi) * np.sin(theta),
distance * np.cos(theta)]
figure.plotter.camera_position = [
position, focalpoint, view_up]
if roll is not None:
figure.plotter.camera.SetRoll(roll)
figure.plotter.renderer._azimuth = azimuth
figure.plotter.renderer._elevation = elevation
figure.plotter.renderer._distance = distance
figure.plotter.renderer._roll = roll
figure.plotter.update()
_process_events(figure.plotter)
def _set_3d_title(figure, title, size=16):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
figure.plotter.add_text(title, font_size=size, color='white',
name='title')
figure.plotter.update()
_process_events(figure.plotter)
def _check_3d_figure(figure):
if not isinstance(figure, _Figure):
raise TypeError('figure must be an instance of _Figure.')
def _close_3d_figure(figure):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# close the window
figure.plotter.close()
_process_events(figure.plotter)
# free memory and deregister from the scraper
figure.plotter.deep_clean()
del _ALL_PLOTTERS[figure.plotter._id_name]
_process_events(figure.plotter)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_process_events(figure.plotter)
return figure.plotter.screenshot(
transparent_background=(mode == 'rgba'),
filename=filename)
def _process_events(plotter):
if hasattr(plotter, 'app'):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
plotter.app.processEvents()
def _set_colormap_range(actor, ctable, scalar_bar, rng=None,
background_color=None):
from vtk.util.numpy_support import numpy_to_vtk
if rng is not None:
mapper = actor.GetMapper()
mapper.SetScalarRange(*rng)
lut = mapper.GetLookupTable()
lut.SetTable(numpy_to_vtk(ctable))
if scalar_bar is not None:
lut = scalar_bar.GetLookupTable()
if background_color is not None:
background_color = np.array(background_color) * 255
ctable = _alpha_blend_background(ctable, background_color)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
lut.SetRange(*rng)
def _set_volume_range(volume, ctable, alpha, scalar_bar, rng):
import vtk
from vtk.util.numpy_support import numpy_to_vtk
color_tf = vtk.vtkColorTransferFunction()
opacity_tf = vtk.vtkPiecewiseFunction()
for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable):
color_tf.AddRGBPoint(loc, *(color[:-1] / 255.))
opacity_tf.AddPoint(loc, color[-1] * alpha / 255.)
color_tf.ClampingOn()
opacity_tf.ClampingOn()
volume.GetProperty().SetColor(color_tf)
volume.GetProperty().SetScalarOpacity(opacity_tf)
if scalar_bar is not None:
lut = vtk.vtkLookupTable()
lut.SetRange(*rng)
lut.SetTable(numpy_to_vtk(ctable))
scalar_bar.SetLookupTable(lut)
def _set_mesh_scalars(mesh, scalars, name):
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mesh.point_arrays[name] = scalars
def _update_slider_callback(slider, callback, event_type):
_check_option('event_type', event_type, ['start', 'end', 'always'])
def _the_callback(widget, event):
value = widget.GetRepresentation().GetValue()
if hasattr(callback, '__call__'):
try_callback(callback, value)
return
if event_type == 'start':
event = vtk.vtkCommand.StartInteractionEvent
elif event_type == 'end':
event = vtk.vtkCommand.EndInteractionEvent
else:
assert event_type == 'always', event_type
event = vtk.vtkCommand.InteractionEvent
slider.RemoveObserver(event)
slider.AddObserver(event, _the_callback)
def _add_camera_callback(camera, callback):
camera.AddObserver(vtk.vtkCommand.ModifiedEvent, callback)
def _update_picking_callback(plotter,
on_mouse_move,
on_button_press,
on_button_release,
on_pick):
interactor = plotter.iren
interactor.AddObserver(
vtk.vtkCommand.RenderEvent,
on_mouse_move
)
interactor.AddObserver(
vtk.vtkCommand.LeftButtonPressEvent,
on_button_press
)
interactor.AddObserver(
vtk.vtkCommand.EndInteractionEvent,
on_button_release
)
picker = vtk.vtkCellPicker()
picker.AddObserver(
vtk.vtkCommand.EndPickEvent,
on_pick
)
picker.SetVolumeOpacityIsovalue(0.)
plotter.picker = picker
def _remove_picking_callback(interactor, picker):
interactor.RemoveObservers(vtk.vtkCommand.RenderEvent)
interactor.RemoveObservers(vtk.vtkCommand.LeftButtonPressEvent)
interactor.RemoveObservers(vtk.vtkCommand.EndInteractionEvent)
picker.RemoveObservers(vtk.vtkCommand.EndPickEvent)
def _arrow_glyph(grid, factor):
glyph = vtk.vtkGlyphSource2D()
glyph.SetGlyphTypeToArrow()
glyph.FilledOff()
glyph.Update()
# fix position
tr = vtk.vtkTransform()
tr.Translate(0.5, 0., 0.)
trp = vtk.vtkTransformPolyDataFilter()
trp.SetInputConnection(glyph.GetOutputPort())
trp.SetTransform(tr)
trp.Update()
alg = _glyph(
grid,
scale_mode='vector',
scalars=False,
orient='vec',
factor=factor,
geom=trp.GetOutputPort(),
)
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(alg.GetOutputPort())
return mapper
def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0,
geom=None, tolerance=0.0, absolute=False, clamping=False, rng=None):
if geom is None:
arrow = vtk.vtkArrowSource()
arrow.Update()
geom = arrow.GetOutputPort()
alg = vtk.vtkGlyph3D()
alg.SetSourceConnection(geom)
if isinstance(scalars, str):
dataset.active_scalars_name = scalars
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True
if scale_mode == 'scalar':
alg.SetScaleModeToScaleByScalar()
elif scale_mode == 'vector':
alg.SetScaleModeToScaleByVector()
else:
alg.SetScaleModeToDataScalingOff()
if rng is not None:
alg.SetRange(rng)
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetScaleFactor(factor)
alg.SetClamping(clamping)
alg.Update()
return alg
def _sphere(plotter, center, color, radius):
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(8)
sphere.SetRadius(radius)
sphere.SetCenter(center)
sphere.Update()
mesh = pyvista.wrap(sphere.GetOutput())
actor = _add_mesh(
plotter,
mesh=mesh,
color=color
)
return actor, mesh
def _volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending, center):
# Now we can actually construct the visualization
grid = pyvista.UniformGrid()
grid.dimensions = dimensions + 1 # inject data on the cells
grid.origin = origin
grid.spacing = spacing
grid.cell_arrays['values'] = scalars
# Add contour of enclosed volume (use GetOutput instead of
# GetOutputPort below to avoid updating)
grid_alg = vtk.vtkCellDataToPointData()
grid_alg.SetInputDataObject(grid)
grid_alg.SetPassCellData(False)
grid_alg.Update()
if surface_alpha > 0:
grid_surface = vtk.vtkMarchingContourFilter()
grid_surface.ComputeNormalsOn()
grid_surface.ComputeScalarsOff()
grid_surface.SetInputData(grid_alg.GetOutput())
grid_surface.SetValue(0, 0.1)
grid_surface.Update()
grid_mesh = vtk.vtkPolyDataMapper()
grid_mesh.SetInputData(grid_surface.GetOutput())
else:
grid_mesh = None
mapper = vtk.vtkSmartVolumeMapper()
if resolution is None: # native
mapper.SetScalarModeToUseCellData()
mapper.SetInputDataObject(grid)
else:
upsampler = vtk.vtkImageReslice()
upsampler.SetInterpolationModeToLinear() # default anyway
upsampler.SetOutputSpacing(*([resolution] * 3))
upsampler.SetInputConnection(grid_alg.GetOutputPort())
mapper.SetInputConnection(upsampler.GetOutputPort())
# Additive, AverageIntensity, and Composite might also be reasonable
remap = dict(composite='Composite', mip='MaximumIntensity')
getattr(mapper, f'SetBlendModeTo{remap[blending]}')()
volume_pos = vtk.vtkVolume()
volume_pos.SetMapper(mapper)
dist = grid.length / (np.mean(grid.dimensions) - 1)
volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist)
if center is not None and blending == 'mip':
# We need to create a minimum intensity projection for the neg half
mapper_neg = vtk.vtkSmartVolumeMapper()
if resolution is None: # native
mapper_neg.SetScalarModeToUseCellData()
mapper_neg.SetInputDataObject(grid)
else:
mapper_neg.SetInputConnection(upsampler.GetOutputPort())
mapper_neg.SetBlendModeToMinimumIntensity()
volume_neg = vtk.vtkVolume()
volume_neg.SetMapper(mapper_neg)
volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist)
else:
volume_neg = None
return grid, grid_mesh, volume_pos, volume_neg
def _require_minimum_version(version_required):
from distutils.version import LooseVersion
version = LooseVersion(pyvista.__version__)
if version < version_required:
raise ImportError('pyvista>={} is required for this module but the '
'version found is {}'.format(version_required,
version))
@contextmanager
def _testing_context(interactive):
from . import renderer
orig_offscreen = pyvista.OFF_SCREEN
orig_testing = renderer.MNE_3D_BACKEND_TESTING
orig_interactive = renderer.MNE_3D_BACKEND_INTERACTIVE
renderer.MNE_3D_BACKEND_TESTING = True
if interactive:
pyvista.OFF_SCREEN = False
renderer.MNE_3D_BACKEND_INTERACTIVE = True
else:
pyvista.OFF_SCREEN = True
renderer.MNE_3D_BACKEND_INTERACTIVE = False
try:
yield
finally:
pyvista.OFF_SCREEN = orig_offscreen
renderer.MNE_3D_BACKEND_TESTING = orig_testing
renderer.MNE_3D_BACKEND_INTERACTIVE = orig_interactive
@contextmanager
def _disabled_depth_peeling():
from pyvista import rcParams
depth_peeling_enabled = rcParams["depth_peeling"]["enabled"]
rcParams["depth_peeling"]["enabled"] = False
try:
yield
finally:
rcParams["depth_peeling"]["enabled"] = depth_peeling_enabled
@contextmanager
def _disabled_interaction(renderer):
plotter = renderer.plotter
if not plotter.renderer.GetInteractive():
yield
else:
plotter.disable()
try:
yield
finally:
plotter.enable()
@decorator
def run_once(fun, *args, **kwargs):
"""Run the function only once."""
if not hasattr(fun, "_has_run"):
fun._has_run = True
return fun(*args, **kwargs)
@run_once
def _init_resources():
from ...icons import resources
resources.qInitResources()
|
bsd-3-clause
|
rajathkumarmp/numpy
|
numpy/lib/npyio.py
|
18
|
71288
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
bsd-3-clause
|
Savahi/tnn
|
samples/learn.py
|
1
|
7723
|
# -*- coding: utf-8 -*-
import sys
import re
import shelve
import numpy as np
import tensorflow as tf
import tnn.utils as utils
from tnn.network import Network
from tnn.calcdata import CalcData
from tnn.io import prepareData
from tnn.utils import countLabels
def main():
# Получаем имя файла с данными, на которых будем обучать и тестировать сеть (fileWithRates)
fileWithRates = None
if len( sys.argv ) >= 2:
fileWithRates = sys.argv[1].strip()
if fileWithRates == None:
print "Use: %s <train-data-file> <parameters (optional)>.\nExiting..." % ( __file__ )
sys.exit(0)
# Параметры сети и ее оптимизации (и их значения по умолчанию)
numLayers = 1 # Число hidden-слоев
numNodes1 = 36 # Число узлов в 1-м hidden-слое (может быть переопределено ниже)
numNodes2 = 36 # Число узлов в 2-м hidden-слое (если numLayers > 1) (может быть переопределено ниже)
numNodes3 = 36 # Число узлов в 3-м hidden-слое (если numLayers > 2) (может быть переопределено ниже)
numNodes4 = 36 # Число узлов в 4-м hidden-слое (если numLayers > 3) (может быть переопределено ниже)
learningRate = 0.050 # Self explained
prognoseProb = None # Пронозная вероятность - решение на сделку принимается, если значение в "торговом" бине > prognoseProb
numEpochs = 1000 # Self explained
optimizer = None # Тип оптимизатора
balancer = 0.0 # Дополнительный вес для последнего бина при вычислении cost-функции. Используется, если balancer > 0.0
flipOverTrading = False # Если flip-over=yes, при проверке качества модели имитируется торговля "с переворотом позиции".
summaryDir = None # Если summary=yes, будет создана папка с summary; tensorboard --logdir=<текущие-дата-и-время_summary>
saveRate = None # Как часто сохранять конфигурацию (веса) сети; имя папки = текущие дата и время
# Читаем аргументы командной строки, с помощью которых можно переопределить значения, заданные по умолчанию
for argvNum in range( 2, len(sys.argv) ):
matchObj = re.match( r'layers *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
numLayers = int( matchObj.group(1) )
matchObj = re.match( r'nodes1 *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
numNodes1 = int( matchObj.group(1) )
matchObj = re.match( r'nodes2 *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
numNodes2 = int( matchObj.group(1) )
matchObj = re.match( r'nodes3 *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
numNodes3 = int( matchObj.group(1) )
matchObj = re.match( r'nodes4 *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
numNodes4 = int( matchObj.group(1) )
matchObj = re.match( r'learning-rate *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
learningRate = float( matchObj.group(1) )
matchObj = re.match( r'prognose-prob *\= *([0-9\.]+)', sys.argv[argvNum], re.I )
if matchObj:
prognoseProb = float( matchObj.group(1) )
matchObj = re.match( r'epochs *\= *([0-9]+)', sys.argv[argvNum], re.I )
if matchObj:
numEpochs = int( matchObj.group(1) )
matchObj = re.match( r'balancer *\= *([0-9\.\-]+)', sys.argv[argvNum], re.I )
if matchObj:
balancer = np.float64( matchObj.group(1) )
matchObj = re.match( r'optimizer *\= *([a-zA-Z0-9\.\_\-]+)', sys.argv[argvNum], re.I )
if matchObj:
optimizer = matchObj.group(1)
matchObj = re.match( r'flip-over *\= *([yY]|[yY][eE][sS])', sys.argv[argvNum], re.I )
if matchObj:
flipOverTrading = True
matchObj = re.match( r'summary *\= *([yY]|[yY][eE][sS])', sys.argv[argvNum], re.I )
if matchObj:
summaryDir = ""
matchObj = re.match( r'save-rate *\= *([0-9]+)', sys.argv[argvNum], re.I )
if matchObj:
saveRate = int( matchObj.group(1) )
#cdt = CalcData( 5, intraDay=True, tradingDays=[1,2,3], tradingTime=[ [13,None],[14,None],[15,None],[16,None],[17,None] ] )
calcData = CalcData( 5 )
calcData.addLookBackOp( "rsi", 0, 6 )
calcData.addLookBackOp( "stochastic", 0, 6 )
calcData.addLookBackOp( "roc", 0, 6 )
calcData.addLookBackOp( "sma", 0, 6 )
calcData.addLookBackOp( "return", 0, 6 )
calcData.addLookBackOp( "vol", 0, 6 )
calcData.addLookAheadOp( "return", 1, bounds=[] )
# Готовим данные для сети
trainData, testData = prepareData( fileWithRates=fileWithRates, detachTest=20, calcData=calcData )
if trainData is None:
print "Failed to prepare data.\nExiting..."
sys.exit(0)
print "Labels: " + str( countLabels( trainData['labels'] ) )
# for i in range( len( trainData['profit'] ) ):
# utils.log( str(trainData['labels'][i]) + ":" + str(trainData['profit'][i]) )
# utils.log( str( testData['profit'] ) )
numSamples = trainData['numSamples']
numFeatures = trainData['numFeatures']
numLabels = trainData['numLabels']
# Эту строку - argText - потом выведем в заголовок графика
argText = "file:%s, lrn:%g, bln:%g, opt:%s, epo:%d flp:%d" % \
(fileWithRates, learningRate, balancer, optimizer, numEpochs, flipOverTrading)
if numLayers == 1:
argText += " nds:%d" % (numNodes1)
if numLayers >= 2:
argText += " nds1:%d nds2:%d" % ( numNodes1, numNodes2 )
if numLayers >= 3:
argText += " nds3:%d" % (numNodes3)
if numLayers >= 4:
argText += " nds4:%d" % (numNodes4)
if prognoseProb is not None:
argText += " prg:%g" % (prognoseProb)
numNodes = [ numNodes1 ]
if numLayers > 1:
numNodes.append( numNodes2 )
if numLayers > 2:
numNodes.append( numNodes3 )
if numLayers > 3:
numNodes.append( numNodes4 )
nn = Network( numLayers, numNodes, numFeatures, numLabels )
# Это на будущее - чтобы потом проинициализировать AdamOptimizer более детально
if optimizer is not None:
if optimizer == "Adam":
optimizer = tf.train.AdamOptimizer( learning_rate = learningRate )
nn.learn( trainData['inputs'], trainData['labels'], trainData['profit'], testData['inputs'], testData['labels'], testData['profit'],
numEpochs=numEpochs, balancer=balancer, autoBalancers=False, learningRate=learningRate, prognoseProb=prognoseProb,
optimizer=optimizer, tradingLabel=None, flipOverTrading=flipOverTrading,
learnIndicators=True, saveRate=saveRate, summaryDir=summaryDir )
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(221)
plt.scatter( nn.costTrain, nn.costTest, marker = '+', color = 'blue', alpha=0.1 )
plt.title( "cost-function: train vs test")
plt.grid()
plt.subplot(222)
plt.scatter( nn.accuracyTrain, nn.accuracyTest, marker = '+', color = 'blue', alpha=0.1 )
plt.title("accuracy: train vs test")
plt.grid()
plt.subplot(223)
plt.scatter( nn.tradeAccuracyTrain, nn.tradeAccuracyTest, marker = '+', color = 'blue', alpha=0.1 )
plt.title("trade accuracy: train vs test")
plt.grid()
plt.subplot(224)
plt.scatter( nn.balanceTrain, nn.balanceTest, marker = '+', color = 'blue', alpha=0.1 )
plt.title("balance: train vs test")
plt.grid()
plt.suptitle( argText )
plt.gcf().set_size_inches( 16, 8 )
plt.savefig( nn.learnDir + ".png", bbox_inches='tight' )
# plt.show()
# end of main
main()
|
mit
|
GGoussar/scikit-image
|
doc/examples/segmentation/plot_thresholding.py
|
1
|
2271
|
"""
============
Thresholding
============
Thresholding is used to create a binary image from a grayscale image [1]_.
.. [1] https://en.wikipedia.org/wiki/Thresholding_%28image_processing%29
.. seealso::
A more comprehensive presentation on
:ref:`sphx_glr_auto_examples_xx_applications_plot_thresholding.py`
"""
######################################################################
# We illustrate how to apply one of these thresholding algorithms.
# Otsu's method [2]_ calculates an "optimal" threshold (marked by a red line in the
# histogram below) by maximizing the variance between two classes of pixels,
# which are separated by the threshold. Equivalently, this threshold minimizes
# the intra-class variance.
#
# .. [2] http://en.wikipedia.org/wiki/Otsu's_method
#
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu
image = data.camera()
thresh = threshold_otsu(image)
binary = image > thresh
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 3, 1, adjustable='box-forced')
ax[1] = plt.subplot(1, 3, 2)
ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0], adjustable='box-forced')
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].hist(image.ravel(), bins=256)
ax[1].set_title('Histogram')
ax[1].axvline(thresh, color='r')
ax[2].imshow(binary, cmap=plt.cm.gray)
ax[2].set_title('Thresholded')
ax[2].axis('off')
plt.show()
######################################################################
# If you are not familiar with the details of the different algorithms and the
# underlying assumptions, it is often difficult to know which algorithm will give
# the best results. Therefore, Scikit-image includes a function to evaluate
# thresholding algorithms provided by the library. At a glance, you can select
# the best algorithm for you data without a deep understanding of their
# mechanisms.
#
from skimage.filters import try_all_threshold
img = data.page()
# Here, we specify a radius for local thresholding algorithms.
# If it is not specified, only global algorithms are called.
fig, ax = try_all_threshold(img, radius=20,
figsize=(10, 8), verbose=False)
plt.show()
|
bsd-3-clause
|
evgchz/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
16
|
3198
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
array([[2]])
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
|
bsd-3-clause
|
DGrady/pandas
|
pandas/core/sorting.py
|
3
|
15943
|
""" miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.categorical import Categorical
from pandas.core.dtypes.common import (
_ensure_platform_int,
_ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = _ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = _ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, _ensure_platform_int(new_labels)
|
bsd-3-clause
|
marpat/pes_nbo
|
pes_nbo3.py
|
1
|
17084
|
# -*- coding: utf-8; -*-
"""
Originally created on May 4, 2014. Enhanced on July 23, 2014.
Part of the script was adapted from
http://verahill.blogspot.com/2013/09/514-extracting-data-form-pes-scan-with.html
Uses Python 2.7 and libraries as implemented in Anaconda from Contibuum Analytics
Run from the terminal window (cmd) or shell as:
>> python pes_nbo3.py output_file.out
Requires Gaussian PES output file (output_file.out) from the Gaussian PES job.
Examples of such files are part of the download in the GitHub repo.
"""
# author: 'Marcel Patek'
# filename: 'test.py'
# date: 7/23/2014
# version: '1.1'
# email: '[email protected]'
# license: 'GNU3'
# usage: python pes_nbo3.py output_file.out
'''
* Copyright (C) 2014 Marcel Patek
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For a copy of the GNU General Public License,
* see <http://www.gnu.org/licenses/>.
'''
import sys
import os
import re
def print_frame_top(m, c):
print(c * m)
def print_frame_bot(m, c):
print(c * m)
def main(argv):
if len(argv) < 2:
print_frame_top(60, '+')
sys.stderr.write("\n Usage: >>[python] %s gau_output.out\n\n" % (argv[0],))
sys.stderr.write(
"\n or : >>[python] %sy gau_output.out\n\n" % sys.argv[0].split("\\")[len(sys.argv[0].split("\\")) - 1][
0:-1])
print_frame_bot(60, '+')
return 1
if not os.path.exists(argv[1]):
print_frame_top(60, '+')
sys.stderr.write("\n ERROR: *.out file %r was not found!\n\n" % (argv[1],))
print_frame_bot(60, '+')
return 1
if len(getscan(sys.argv[1])) < 1:
print_frame_top(60, '+')
sys.stderr.write("\n ERROR: This does not seem to be the right file. Scan coordinate is missing.\n\n")
print_frame_bot(60, '+')
return 1
def rundif(it, zero):
"""
Create values for relative energy in kcal/mol
:param it: list of energies
:param zero: energy to which other values will be referenced to
"""
for x in it:
ener = x - zero
yield ener * 627.51
def getscan(infile):
"""
Find the 'Scan' keyword in gau.out file
:param infile: Gaussian PES output file
:return: line as string containing word Scan
"""
try:
f = open(infile, 'r')
getcoord = ''
fi = f.readlines()
for line in fi:
if '!' in line and "Scan" in line:
getcoord = line.split() # splits by words
f.close()
return getcoord
except IOError:
print "This does not seem to be the right file. Scan coordinate is missing."
def getrawdata(infile):
f = open(infile, 'r')
opt = 0
geo = 0
optpar = 0
coords = []
struct = []
structure = []
energies = []
energy = []
for line in f:
if opt == 1 and geo == 1 and not ("---" in line): # grab the lines of XYZ coordinates
structure += [line.rstrip()]
if 'Optimized Parameters' in line: # Set flags to grab the right strings
optpar = 1
if 'Coordinates (Angstroms)' in line:
if opt == 0:
opt = 1
structure = []
if opt == 1 and "--------------------------" in line:
if geo == 0:
geo = 1
elif geo == 1:
geo = 0
opt = 0
if 'SCF Done' in line:
energy = filter(None, line.rstrip('\n').split(' '))
if 'Optimization completed' in line and (opt == 0 and geo == 0):
energies += [float(energy[4])]
opt = 0
geo = 0
struct += [structure]
structure = []
if optpar == 1 and '! ' + scanned in line:
coord = filter(None, line.rstrip('\n').split(' '))
coords += [coord[3]]
optpar = 0
return struct, energies, coords
def periodictable(elementnumber):
ptable = {1: 'H', 2: 'He',
3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'Na', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar',
19: 'K', 20: 'Ca',
21: 'Sc', 22: 'Ti', 23: 'V', 24: 'Cr', 25: 'Mn', 26: 'Fe', 27: 'Co', 28: 'Ni', 29: 'Cu', 30: 'Zn',
31: 'Ga', 32: 'Ge', 33: 'As', 34: 'Se', 35: 'Br', 36: 'Kr',
37: 'Rb', 38: 'Sr',
39: 'Y', 40: 'Zr', 41: 'Nb', 42: 'Mo', 43: 'Tc', 44: 'Ru', 45: 'Rh', 46: 'Pd', 47: 'Ag', 48: 'Cd',
49: 'In', 50: 'Sn', 51: 'Sb', 52: 'Te', 53: 'I', 54: 'Xe',
55: 'Cs', 56: 'Ba',
57: 'La', 58: 'Ce', 59: 'Pr', 60: 'Nd', 61: 'Pm', 62: 'Sm', 63: 'Eu', 64: 'Gd', 65: 'Tb', 66: 'Dy',
67: 'Ho', 68: 'Er', 69: 'Tm', 70: 'Yb', 71: 'Lu',
72: 'Hf', 73: 'Ta', 74: 'W', 75: 'Re', 76: 'Os', 77: 'Ir', 78: 'Pt', 79: 'Au', 80: 'Hg',
81: 'Tl', 82: 'Pb', 83: 'Bi', 84: 'Po', 85: 'At', 86: 'Rn',
87: 'Fr', 88: 'Ra',
89: 'Ac', 90: 'Th', 91: 'Pa', 92: 'U', 93: 'Np', 94: 'Pu', 95: 'Am', 96: 'Cm', 97: 'Bk', 98: 'Cf',
99: 'Es', 100: 'Fm', 101: 'Md',
102: 'No', 103: 'Lr', 104: 'Rf', 105: 'Db', 106: 'Sg', 107: 'Bh', 108: 'Hs', 109: 'Mt', 110: 'Ds',
111: 'Rg', 112: 'Cn',
113: 'Uut', 114: 'Fl', 115: 'Uup', 116: 'Lv', 117: 'Uus', 118: 'Uuo'}
element = ptable[elementnumber]
return element
def genxyzstring(coor, elementnumber):
x_str = '%10.5f' % coor[0]
y_str = '%10.5f' % coor[1]
z_str = '%10.5f' % coor[2]
element = periodictable(int(elementnumber))
xyz_string = element + (3 - len(element)) * ' ' + 10 * ' ' + \
(8 - len(x_str)) * ' ' + x_str + 10 * ' ' + (8 - len(y_str)) * ' ' + y_str + 10 * ' ' + \
(8 - len(z_str)) * ' ' + z_str + '\n'
return xyz_string
def getstructures(rawdata, coords, nbo):
for structure, deg in zip(rawdata, coords):
g = open(geo_fn + '_' + deg + '_' + '.gjf', 'w')
cartesian = []
chk = "%chk=" + geo_fn + '_' + deg + '_' + ".chk" + '\n'
g.write(chk)
g.write(card)
note = geo_fn + '_' + deg + "_" + ", sp at " + met + '/' + bas + '\n\n'
g.write(note)
for item in structure:
coords = filter(None, item.split(' '))
coordinates = [float(coords[3]), float(coords[4]), float(coords[5])]
element = coords[1]
cartesian += [genxyzstring(coordinates, element)]
g.write('0 1' + '\n')
for line in cartesian:
g.write(line)
nbo = re.sub("_\d\.?\d?\.?\d?\.?", "_" + deg, nbo)
g.write(nbo)
g.close()
return 0
def getbatch(directory, coords):
"""
Prepare for creating Gaussian batch (bcf) file
:param directory: path to the destination directory
:param coords: suffix (deg, ditances) to distiguish files by the coordinate step
:return: string of input and destination files
"""
batch = []
for deg in coords:
gjf = directory + '\\' + geo_fn + '_' + deg + '_' + '.gjf'
out = geo_fn + '_' + deg + '_' + '.out'
batch.append(gjf + ', ' + out)
return batch
# Round number for pretty output; coordinates A(int), R(1), D(int), F(1)
def list_get(l, coordinate, v = "R"):
coords_rnd = []
if coordinate == "A" or coordinate == "D":
for value in l:
coords_rnd += [str(int(round(float(value))))]
return coords_rnd
else:
for value in l:
coords_rnd += [str(round(float(value), 2))]
return coords_rnd
if __name__ == "__main__":
# Errors in the input - get the usage and errors
if main(sys.argv) == 1:
raise SystemExit
# Read data in from terminal
infile = sys.argv[1]
# Extract file name for later file naming
geo_fn = infile.split('.')[0]
# ######## Menu entries ##
# THEORY
print '\n'
print (15 * '-')
print (" SET THEORY ")
print (15 * '-')
print (" 1. HF")
print (" 2. B3LYP")
print (" 3. M06-2X")
print (" 4. wB97XD")
print (" 5. MP2")
print (" 6. other ..")
print (15 * '-')
is_valid = 0
while not is_valid:
try:
metraw = int(raw_input('Enter your choice [1-6] : '))
is_valid = 1 # set it to 1 to validate input and to terminate the while..not loop
except ValueError, e:
print ("'%s' is not a valid entry." % e.args[0].split(": ")[1])
if metraw == 1:
met = 'HF'
elif metraw == 2:
met = 'B3LYP'
elif metraw == 3:
met = 'M06-2X'
elif metraw == 4:
met = 'wB97XD'
elif metraw == 5:
met = 'MP2'
elif metraw == 6:
met = raw_input('\nType the theory level: ')
if len(met) < 2:
print "\n ---> Wrong entry. B3LYP will be used."
met = 'B3LYP'
else:
met = 'B3LYP'
# BASIS SET
print '\n'
print (15 * '-')
print (" BASIS SET ")
print (15 * '-')
print (" 1. 6-31+G(d)")
print (" 2. 6-311++G(d,p)")
print (" 3. Type other ..")
print (15 * '-')
is_valid = 0
while not is_valid:
try:
basraw = int(raw_input('Enter your choice [1-3] : '))
is_valid = 1 # set it to 1 to validate input and to terminate the while..not loop
except ValueError, e:
print ("'%s' is not a valid entry." % e.args[0].split(": ")[1])
if basraw == 1:
bas = '6-31+G(d)'
elif basraw == 2:
bas = '6-311++G(d,p)'
elif basraw == 3:
bas = raw_input('\nType the basis set: ')
if len(bas) < 2:
print "\n ---> Wrong entry. 6-311++G(d,p) will be used."
bas = '6-311++G(d,p)'
else:
bas = '6-311++G(d,p)'
# How to run NBO
print '\n'
print (15 * '-')
print (" NBO OPTIONS ")
print (15 * '-')
print (" 1. create .47 file only (archive)")
print (" 2. run linked G09-NBO")
print (" 3. run compiled G09-NBO binaries")
print (15 * '-')
is_valid = 0
while not is_valid:
try:
nboraw = int(raw_input('Enter your choice [1-3] : '))
is_valid = 1 # set it to 1 to validate input and to terminate the while..not loop
except ValueError, e:
print ("'%s' is not a valid entry." % e.args[0].split(": ")[1])
if nboraw == 2:
option = 'run linked G09-NBO'
if nboraw == 3:
option = 'run compiled G09-NBO binaries'
# NBO keywords
if nboraw == 1:
deg = ''
nbo = '\n' + "$NBO archive FILE=" + geo_fn + '_0' + "_ $END" + '\n\n'
option = 'create .47 file only (archive)'
# NBO keywords for option 2,3
else:
print '\n'
print '\n' + " You will need to choose NBO keywords."
print " Use GennboHelper to copy/paste keywords to the input 3."
print (15 * '-')
print (" NBO KEYWORDS ")
print (15 * '-')
print (" 1. Keyword set 1 (NBOSUM DIST BNDIDX DIPOLE=0.02 E2PERT=5 PRINT=2)")
print (" 2. Keyword set 2 (STERIC=0.5 DIST E2PERT=5 PRINT=2)")
print (" 3. other ..")
print (15 * '-')
is_valid = 0
while not is_valid:
try:
nbokey = int(raw_input('Enter NBO keywords : '))
is_valid = 1 # set it to 1 to validate input and to terminate the while..not loop
except ValueError, e:
print ("'%s' is not a valid entry." % e.args[0].split(": ")[1])
if nbokey == 1:
keywords = 'NBOSUM DIST BNDIDX DIPOLE=0.02 E2PERT=5 PRINT=2'
nbo = '\n' + "$NBO " + keywords + " $END" + '\n\n'
elif nbokey == 2:
keywords = 'STERIC=0.5 DIST E2PERT=5 PRINT=2'
nbo = '\n' + "$NBO " + keywords + " $END" + '\n\n'
elif nbokey == 3:
keywords = raw_input('\nType/paste the keywords (space separated): ')
nbo = '\n' + "$NBO " + keywords + " $END" + '\n\n'
if len(keywords) < 3:
print "\n ---> Wrong entry. 'DEFAULT' will be used."
keywords = 'NBOSUM NRT STERIC=0.5 DIST BNDIDX DIPOLE=0.02 E2PERT=5 PRINT=2'
else:
keywords = 'NBOSUM NRT STERIC=0.5 DIST BNDIDX DIPOLE=0.02 E2PERT=5 PRINT=2'
nbo = '\n' + "$NBO archive FILE=" + geo_fn + '_0' + "_ $END" + '\n\n'
option = 'create .47 file only (archive)'
# ####### Menu ends ########
print "\n\n Theory/Basis: " + met + '/' + bas + '\n'
print " NBO options: " + option + '\n'
scanned = getscan(infile)[1]
sccoord = getscan(infile)[2]
print " Scanned coordinate is: " + sccoord + " with label: " + scanned + '\n'
# Route card
if nboraw == 1:
card = "# " + met + '/' + bas + " pop=nboread sp nosymm" + '\n\n'
elif nboraw == 2:
card = "# " + met + '/' + bas + " external=C:\G09W\gaunbo6.bat POP=NBO6Read sp nosymm" + '\n\n'
elif nboraw == 3:
card = "# " + met + '/' + bas + " pop=nbo6read sp nosymm" + '\n\n'
else:
card = "# " + met + '/' + bas + " pop=nboread sp nosymm" + '\n\n'
rawdata, energies, coords = getrawdata(infile)
# Format coords steps
regexR = re.compile("^R.+")
regexA = re.compile("^A.+")
regexD = re.compile("^D.+")
if regexA.match(scanned):
coordinate = "A"
elif regexR.match(scanned):
coordinate = "R"
elif regexD.match(scanned):
coordinate = "D"
else:
coordinate = "F"
# Call rounding function
coords_round = list_get(coords, coordinate)
# Print string list
# for value in coords_round:
# print value
structures = getstructures(rawdata, coords_round, nbo)
# Write results to a file
g = open(geo_fn + '_energies.dat', 'w') # get energies for graph
for n in range(0, len(coords_round)):
g.write(coords_round[n] + '\t' + str(energies[n]) + '\n')
g.close()
decor = len(os.path.dirname(os.path.realpath(__file__))) + 31
print_frame_top(decor, '*')
directory = os.path.dirname(os.path.realpath(__file__))
print str(
len(energies)) + " files and Gaussian " + geo_fn + "_batch.bcf batch file are in directory: " + '\n' + directory
print_frame_bot(decor, '*')
# write gaussian batch file .bcf
batchf = getbatch(directory, coords_round)
b = open(geo_fn + '_batch.bcf', 'w') # get files into batch file
b.write("!" + '\n'
"!User created batch file" + '\n'
"!start=1" + '\n'
"!" + '\n')
for n in range(0, len(batchf)):
b.write(str(batchf[n]) + '\n')
b.close()
# Prepare for plots and prints
maxmindif = (max(energies) - min(energies)) * 627.51
coords = map(float, coords) # list of strings to floats
rangeX = abs(max(coords) - min(coords)) # find the range
firstEne = min(energies) # reference energy
# reformat energy list
ene = list(rundif(energies, firstEne)) # subtract current energy from reference*627.51
ene = ["%.2f" % member for member in ene] # format numbers
ene = map(float, ene)
# ploting
if coordinate == "A":
plotcoord = "Angle, deg"
elif coordinate == "R":
plotcoord = "Distance, Angstrom"
elif coordinate == "D":
plotcoord = "Dihedral, deg"
else:
plotcoord = ""
try:
import pylab as pl
from pylab import *
pylab_available = True
except ImportError:
pylab_available = False
print "Pylab and matplotlib modules were not imported. Use the .dat file to print"
if pylab_available:
X = coords
y = ene
pl.ylim(min(y) - 0.1 * maxmindif, max(y) + 0.1 * maxmindif)
pl.xlim(min(X) - 0.1 * rangeX, max(X) + 0.1 * rangeX)
pl.xlabel('Coordinate (' + plotcoord + ')')
pl.ylabel('rel Energy (kcal/mol)')
pl.plot(X, y, 'bo', label='(max-min)dE=%5.2f' % maxmindif + ' kcal/mol')
pl.plot(X, y, ':k')
plt.axhline(y=0, xmin=0, xmax=1, linewidth=1, color='b')
pl.legend(loc='upper right')
locs, labels = yticks()
yticks(locs, map(lambda x: "%.1f" % x, locs * 1e0))
if coordinate == "A" or coordinate == "D":
pl.xticks(np.arange(min(X), max(X)+1, max(X)/(len(coords)-1)))
text(0.0, 1.01, '', fontsize=10, transform=gca().transAxes)
pl.show()
print
else:
exit(1)
|
gpl-3.0
|
depet/scikit-learn
|
examples/plot_train_error_vs_test_error.py
|
8
|
2548
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import pylab as pl
pl.subplot(2, 1, 1)
pl.semilogx(alphas, train_errors, label='Train')
pl.semilogx(alphas, test_errors, label='Test')
pl.vlines(alpha_optim, pl.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
pl.legend(loc='lower left')
pl.ylim([0, 1.2])
pl.xlabel('Regularization parameter')
pl.ylabel('Performance')
# Show estimated coef_ vs true coef
pl.subplot(2, 1, 2)
pl.plot(coef, label='True coef')
pl.plot(coef_, label='Estimated coef')
pl.legend()
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
pl.show()
|
bsd-3-clause
|
TobyRoseman/SFrame
|
oss_src/unity/python/sframe/data_structures/gframe.py
|
5
|
10768
|
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .sframe import SFrame
from ..cython.context import debug_trace as cython_context
from ..util import _is_non_string_iterable
from .sarray import SArray, _create_sequential_sarray
import copy
VERTEX_GFRAME = 0
EDGE_GFRAME = 1
class GFrame(SFrame):
"""
GFrame is similar to SFrame but is associated with an SGraph.
- GFrame can be obtained from either the `vertices` or `edges`
attributed in any SGraph:
>>> import graphlab
>>> g = graphlab.load_sgraph(...)
>>> vertices_gf = g.vertices
>>> edges_gf = g.edges
- GFrame has the same API as SFrame:
>>> sa = vertices_gf['pagerank']
>>> # column lambda transform
>>> vertices_gf['pagerank'] = vertices_gf['pagerank'].apply(lambda x: 0.15 + 0.85 * x)
>>> # frame lambda transform
>>> vertices_gf['score'] = vertices_gf.apply(lambda x: 0.2 * x['triangle_count'] + 0.8 * x['pagerank'])
>>> del vertices_gf['pagerank']
- GFrame can be converted to SFrame:
>>> # extract an SFrame
>>> sf = vertices_gf.__to_sframe__()
"""
def __init__(self, graph, gframe_type):
self.__type__ = gframe_type
self.__graph__ = graph
self.__sframe_cache__ = None
self.__is_dirty__ = False
def __to_sframe__(self):
return copy.copy(self._get_cache())
#/**************************************************************************/
#/* */
#/* Modifiers */
#/* */
#/**************************************************************************/
def add_column(self, data, name=""):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
Parameters
----------
data : SArray
The 'column' of data.
name : string
The name of the column. If no name is given, a default name is chosen.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
def add_columns(self, datalist, namelist):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
Parameters
----------
datalist : list of SArray
A list of columns
namelist : list of string
A list of column names. All names must be specified.
"""
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(namelist):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list: must all be str")
for (data, name) in zip(datalist, namelist):
self.add_column(data, name)
def remove_column(self, name):
"""
Removes the column with the given name from the SFrame.
Parameters
----------
name : string
The name of the column to remove.
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert name != '__src_id', 'Cannot remove \"__src_id\" column'
assert name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)
self.__graph__.__proxy__ = graph_proxy
except:
self.__is_dirty__ = False
raise
def swap_columns(self, column_1, column_2):
"""
Swaps the columns with the given names.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
"""
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
def rename(self, names):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
def add_row_number(self, column_name='id', start=0):
if type(column_name) is not str:
raise TypeError("Must give column_name as str")
if column_name in self.column_names():
raise RuntimeError("Column name %s already exists" % str(column_name))
if type(start) is not int:
raise TypeError("Must give start as int")
the_col = _create_sequential_sarray(self.num_rows(), start)
self[column_name] = the_col
return self
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if (key in ['__id', '__src_id', '__dst_id']):
raise KeyError('Cannot modify column %s. Changing __id column will\
change the graph structure' % key)
else:
self.__is_dirty__ = True
super(GFrame, self).__setitem__(key, value)
#/**************************************************************************/
#/* */
#/* Read-only Accessor */
#/* */
#/**************************************************************************/
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges']
def num_cols(self):
"""
Returns the number of columns.
Returns
-------
out : int
Number of columns in the SFrame.
"""
return len(self.column_names())
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types()
#/**************************************************************************/
#/* */
#/* Internal Private Methods */
#/* */
#/**************************************************************************/
def _get_cache(self):
if self.__sframe_cache__ is None or self.__is_dirty__:
if self._is_vertex_frame():
self.__sframe_cache__ = self.__graph__.get_vertices()
elif self._is_edge_frame():
self.__sframe_cache__ = self.__graph__.get_edges()
else:
raise TypeError
self.__is_dirty__ = False
return self.__sframe_cache__
def _is_vertex_frame(self):
return self.__type__ == VERTEX_GFRAME
def _is_edge_frame(self):
return self.__type__ == EDGE_GFRAME
@property
def __proxy__(self):
return self._get_cache().__proxy__
|
bsd-3-clause
|
Unidata/MetPy
|
v0.9/_downloads/3bc54791e5cd0cc3a49ebff67ae43c26/Simple_Sounding.py
|
3
|
3067
|
# Copyright (c) 2015,2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Simple Sounding
===============
Use MetPy as straightforward as possible to make a Skew-T LogP plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
###########################################
# Change default to be better for skew-T
plt.rcParams['figure.figsize'] = (9, 9)
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
###########################################
# Example of defining your own vertical barb spacing
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
# Set spacing interval--Every 50 mb from 1000 to 100 mb
my_interval = np.arange(100, 1000, 50) * units('mbar')
# Get indexes of values closest to defined interval
ix = mpcalc.resample_nn_1d(p, my_interval)
# Plot only values nearest to defined interval values
skew.plot_barbs(p[ix], u[ix], v[ix])
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
# Show the plot
plt.show()
|
bsd-3-clause
|
miseiler/crosstalker
|
pyvisml/VisML.py
|
1
|
63976
|
"""
PyVisML: A complete python API for interaction with VisANT VisML xml files
Copyright 2013 Michael Seiler
Boston University
[email protected]
This file is part of PyVisML.
PyVisML is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyVisML is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyVisML. If not, see <http://www.gnu.org/licenses/>.
"""
import xml.etree.ElementTree as xml
import numpy as N
from warnings import warn
import xml.dom.minidom as minidom
import StringIO, os, sys
VERSION = '1.35'
DEFAULT_SPECIES = 'uno'
def determine_path():
"""Borrowed from wxglade.py"""
try:
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
return os.path.dirname(os.path.abspath(root))
except:
print "I'm sorry, but something is wrong."
print "There is no __file__ variable. Please contact the author."
sys.exit()
DEFAULT_METHODS_FILE = os.path.join(determine_path(),'data','default_methods.xml')
try:
from matplotlib.colors import ColorConverter as cc
COLORCONVERTER = cc()
except:
COLORCONVERTER = None
def create_empty_visml_tree(default_methods_file=DEFAULT_METHODS_FILE, **attrib):
r = VisMLTree()
if 'ver' not in attrib:
attrib.update({'ver': VERSION})
if 'species' not in attrib:
attrib.update({'species': DEFAULT_SPECIES})
if 'autoFit' not in attrib:
attrib.update({'autoFit': 'true'})
root = r._root = VisAnt(**attrib)
t = xml.parse(default_methods_file)
for elem in t.getroot().findall('method'):
root.append(method(**elem.attrib))
root.append(Nodes(**attrib))
root.append(Edges(**attrib))
return r
def settag(self, value, name):
if value != None:
self.set(name, value)
else:
if name in self.attrib:
del self.attrib[name]
def settext(self, value, name):
obj = eval(name)
v = self.find(name)
if value != None:
if v:
self.remove(v)
self.append(obj(value))
else:
if v is not None:
self.remove(v)
def rgb_list_to_text(lst, method_format=False):
if method_format:
sep = ','
else:
sep = ' '
return sep.join([ str(int(x)) for x in lst[:3] ])
def colorwrap(value, method_format=False):
if isinstance(value, tuple) or isinstance(value, list):
return rgb_list_to_text(value, method_format)
if COLORCONVERTER is not None:
try:
rgb_tuple = COLORCONVERTER.to_rgb(value)
rgb_tuple = N.array(rgb_tuple) * 255
return rgb_list_to_text(rgb_tuple, method_format)
except:
pass
return value
def add_to_set(prop, newelem):
# list properties return a copy of the internal list, and modification is only supported through direct assignment (i.e., node.groups = grouplist)
lstcpy = prop
if lstcpy is not None:
if newelem in lstcpy:
return lstcpy
lstcpy.append(newelem)
else:
lstcpy = [newelem]
return lstcpy
def rem_from_set(prop, name):
if prop is not None:
if name in prop:
lstcpy = set(prop)
lstcpy.remove(name)
return list(lstcpy)
return prop
def get_node_name(node):
assert isinstance(node, VNodes)
if node.isduplicate:
return node.uid
return node.name
class VisMLTree(object):
def __init__(self, filename=None):
self._index = 0
if filename is not None:
self.parse(filename)
def isconnected(self, node1, node2):
"""Takes either node names or node objects as input"""
return self._edges.isconnected(node1, node2)
def validate(self):
for elem in self:
elem._updatestatic()
elem.validate()
def parse(self, filename):
tree = xml.parse(filename)
root = tree.getroot()
assert root.tag == 'VisAnt'
self._root = VisAnt(**root.attrib)
for child in root.getchildren():
self._addbranch(self._root, child)
for elem in self:
elem._updatestatic()
def write(self, filename, prettyprint=True):
"""
Setting the "prettyprint" keyword enables nicely-formatted output via the python xml minidom module
This is very memory-intensive, so if your xml tree is large, set it to False
"""
for elem in self:
elem._updatestatic()
stringout = StringIO.StringIO()
xml.ElementTree(self._root).write(stringout, xml_declaration=True, encoding="utf-8")
# Hack to allow prettyprint through xml.dom.minidom
if prettyprint:
output = minidom.parseString(stringout.getvalue()).toprettyxml()
else:
output = stringout.getvalue()
stringout.close()
f = open(filename, 'w')
f.write(output)
f.close()
@property
def nodes(self):
"""A list of attached nodes"""
return self._root.nodes
@property
def metanodes(self):
"""A list of attached nodes"""
return [ x for x in self.nodes if x.ismetanode ]
@property
def edges(self):
"""A list of edges in the network"""
return self._root.edges
@property
def _nodes(self):
"""The object itself, rather than a list of nodes"""
return self._root.find('Nodes')
@property
def _edges(self):
"""The object itself, rather than a list of edges"""
return self._root.find('Edges')
def __iter__(self):
return self._root.iter()
def add_edge(self, node1, node2, method='M0099', **attrib):
"""
Create a link between node1 and node2
Note that the link is bidirectional. A VEdge is automatically created.
Method is required. Default is M0099, which corresponds to 'unknown'.
"""
try:
assert isinstance(node1, VNodes) and isinstance(node2, VNodes)
except:
raise ValueError, 'Arguments should be of type VNodes'
node1_name = get_node_name(node1)
node2_name = get_node_name(node2)
if not self.isconnected(node1_name, node2_name):
# Create VEdge
edge = self._edges.add_edge(node1, node2, **attrib)
# Add data link
if node1.isduplicate and node2.isduplicate:
node1.parent.add_link(node2.parent.name, method, fromDup=node1.uid, toDup=node2.uid, **attrib)
node2.parent.add_link(node1.parent.name, method, fromDup=node2.uid, toDup=node1.uid, **attrib)
elif node1.isduplicate or node2.isduplicate:
dup, nde = node1, node2
if node2.isduplicate:
nde, dup = node1, node2
dup.parent.add_link(nde.name, method, fromDup=dup.uid, **attrib)
nde.add_link(dup.parent.name, method, toDup=dup.uid, **attrib)
else:
node1.add_link(node2.name, method, **attrib)
node2.add_link(node1.name, method, **attrib)
def remove_edge(self, node1, node2):
# TODO This doesn't support unidirectional linkage removal, since add_edge doesn't support adding them
try:
assert isinstance(node1, VNodes) and isinstance(node2, VNodes)
except:
raise ValueError, 'Arguments should be of type VNodes'
node1_name = get_node_name(node1)
node2_name = get_node_name(node2)
if not self.isconnected(node1_name, node2_name):
return
# XXX If there can be multiple edge links via different methods, this function will not clean up properly!
self._edges.remove_edge(node1, node2)
if node1.isduplicate:
node1 = node1.parent
if node2.isduplicate:
node2 = node2.parent
node1.remove_link(node2_name)
node2.remove_link(node1_name)
def remove_node_from_metanode(self, node, metanode):
"""
Removes node from group metanode
Note that in the case where node is a metanode and contains children, these children
will NOT be added to the parent metanode.
"""
assert metanode.ismetanode
node_name = get_node_name(node)
if node.isduplicate:
if node.group == metanode.name:
node.group = None
else:
node.remove_group(metanode.name)
metanode.remove_child(node_name)
def remove_node(self, node):
"""
Removes the given node.
Because the node can be
1) Connected to other nodes
2) A child of some metanode
3) A metanode with children
4) A duplicate
5) A duplicate which is also the child of a metanode
Extensive cleanup must be performed
"""
node_name = get_node_name(node)
if node.isduplicate:
node.parent.remove(node)
else:
# Delete all duplicates
if node.duplicates is not None:
for n in node.duplicates:
self.remove_node(n)
# If the node is a metanode, remove all group references from other nodes
if node.ismetanode:
for n in self.nodes:
n.remove_group(node.name)
self._nodes.remove(node)
# Remove all edges connected to the node
for n in self.nodes:
if self.isconnected(n, node):
self.remove_edge(n, node)
# Finally, if any metanodes have this as a child, remove those references
for n in self.nodes:
if n.ismetanode:
n.remove_child(node_name)
def add_node(self, name, x, y, w='16', h='16', vlabel=None, **attrib):
index = self._inc_index()
node = self._nodes.add_node(name, x, y, '0', index, w=w, h=h, vlabel=vlabel, **attrib)
return node
def duplicate_node(self, node, x, y, vlabel=None, **attrib):
index = self._inc_index()
return node.add_duplicate(x, y, index, vlabel=vlabel, **attrib)
def add_metanode(self, name, x, y, vlabel=None, children=None, **attrib):
index = self._inc_index()
node = self._nodes.add_node(name, x, y, '3', index, vlabel=vlabel, children=children, **attrib)
return node
def add_node_to_metanode(self, node, metanode, duplicate=True):
"""
Adds a given node to a given metanode
If 'duplicate' is set to True and the node is already part of a group,
the node is duplicated and the duplicate is assigned to the metanode (default VisANT behavior)
This is ignored if the node is itself a duplicate, and the node will be assigned as normal.
Do not set duplicate to False unless you know what you are doing!
As of Feb 27, 2013, there are several VisANT bugs associated with this. Notably, that
hiding/unhiding a metanode which contains non-duplicated nodes will delete
said nodes without warning.
"""
assert metanode.ismetanode
if node.ismetanode:
metanode.add_child(node.name)
node.groups = [metanode.name]
return
if duplicate and not node.isduplicate and node.groups is not None:
node = self.duplicate_node(node, node.x, node.y)
node_name = get_node_name(node)
if node.isduplicate:
node.group = metanode.name
node.parent.add_group(metanode.name)
else:
node.add_group(metanode.name)
metanode.add_child(node_name)
# TODO add_method, add_pathway, add_id, transparent type mechanism (accept/return types, store string internally)
# Make sure deleting works, plus accounting
def add_method(self, name, desc, type, visible=None, weight=None, species=None, color=None):
"""
name: the method id used in VisANT, required. If you need to have your own method, please put the id in the range M7000-M7999 so that VisANT will not enable querying all interaction of this method in the databases.
desc: the description of the method, required.
type: the type of the method, either "E" or "C" to indicate experimental or computational, required.
visible: indicates whether the edge determined by this method only is visible or not, default is true, optional.
weight: the reliability score of the method, optional, not used so far.
"""
self._root.append(method(name=name, desc=desc, type=type, visible=visible, weight=weight, species=species, color=color))
def _inc_index(self):
self._index += 1
return str(self._index)
def _addbranch(self, root, child):
if 'index' in child.attrib:
self._index = max(self._index, int(child.get('index')))
try:
obj = eval(child.tag)
except:
raise ValueError('Unrecognized tag %s' % child.tag)
if child.tag in ('method', 'pathway', 'VScreen', 'exLink', 'Nodes', 'VNodes', 'id', 'group', 'Dup', 'link', 'data', 'Edges', 'VEdge'):
v = obj(**child.attrib)
elif child.tag in ('vlabel', 'children', 'desc'):
v = obj(child.text)
else:
raise NotImplementedError('Unimplemented tag %s' % child.tag)
if child.tag == 'Dup':
v.parent = root
root.append(v)
for c in child.getchildren():
self._addbranch(v, c)
class VisMLElement(xml.Element):
"""
Base class for VisML elements
Inherits xml.Element
"""
def __init__(self, elementname):
xml.Element.__init__(self, elementname)
def __delitem__(self, x):
warnings.warn('WARNING: Deletion of child elements bugged, try parentelement.remove(child) instead') # TODO
try:
self.remove(x)
except:
# As long as settag is called, this will result in deleting the attribute
self.x = None
def validate(self):
return True
def _updatestatic(self):
pass
class VisAnt(VisMLElement):
"""
root element
ver: indicates the version of visML, this attribute is required. However, it is used only for internal control. For most users, simply put an number that is bigger than 1.32 will be fine.
Note that for compatibility reasons VisAnt will ignore VisML trees that do not begin with ver, e.g., <VisAnt ver=
species: indicates the species of the network, this attribute is required. If the species of your network is not species-specific,
or not in the complete list of species supported in VisANT, simply put "uno" to indicate that the species is "unkown".
This attribute is useful if you need to query database for further information of your network. For VisANT 2.46 and above, all corresponding database queries will be disabled if the species is "unknown".
nodecount: indicates the total number of nodes, this attribute is required. Be aware it is the number of total visible and invisible nodes.
edgeopp: used for meta graph only, optional, default value is false.
fineArt: used to indicate whether to use better graph drawing, such as thick line, optional, default value is true. Turn it off for large network
autoFit: used to indicate whether need to fit the network to the default network window, optional, default false.
double_click: used to change the default behavior when user double-clicking a non-meta-node, if the value of this attribute matches the id attribute of <exLink> double-clicking on the node will launch the link in default browser.
db_online: used to indicate whether need to query database, optional, default is true. This option will be overwritten if the species is unknown. Available after visML 1.36.
layout: used to layout the network with specified type of layout, optional. This attribute supports all type of layout that is available in VisANT.
Available after visML 1.36. Note that this is a read-only attribute. Therefore if a file with this attribute is loaded into VisANT and is later saved by the user,
this attribute will be gone in the new-saved file. Here is the list of possible values of this attribute: circle, spoke, scramble, relax, embedded, and elegant.
The later three can have additional parameter for the number of iterations of the layout in the format of embedded:100, as an example. The default iteration is 250.
Be aware that this number should be bigger if you have a large network. Here is a list of examples with different variants of this optional attribute:
bgcolor: saved background color
net_size: Seems to affect the width,height of the network. Changes out the network is displayed, similar to VScreen.
"""
def __init__(self, ver=None, species=None, net_size=None, bgcolor=None, nodecount=None, edgeopp=None, fineArt=None, autoFit=None, double_click=None, db_online=None, layout=None, **kwds):
VisMLElement.__init__(self, 'VisAnt')
# XXX This is a static property now
#self.nodecount = nodecount
self.ver = ver
self.species = species
self.edgeopp = edgeopp # Bool
self.fineArt = fineArt # Bool
self.autoFit = autoFit # Bool
self.layout = layout # circle, spoke, scramble, relax, embedded, elegant
self.double_click = double_click
self.db_online = db_online
self.net_size = net_size
self.bgcolor = bgcolor
def validate(self):
try:
assert all(self.ver, self.species)
except:
warn('VisAnt root element is missing required tags (ver, species)')
if self.double_click:
try:
assert self.double_click in [ x.id for x in self.findall('exLink') ]
except:
warn('double_click value not found in available exLink elements')
return True
@property
def nodes(self):
if self.find('Nodes') is not None:
return self.find('Nodes').findall('VNodes')
@property
def edges(self):
if self.find('Edges') is not None:
return self.find('Edges').findall('VEdge')
@property
def ver(self):
return self.get('ver')
@ver.setter
def ver(self, value):
try:
assert float(value) >= 1.32
except:
raise ValueError, 'Version should be greater than 1.32; PyVisML does not support earlier versions'
settag(self, value, 'ver')
@property
def species(self):
return self.get('species')
@species.setter
def species(self, value):
# TODO: Assert species is in default list, otherwise 'uno'
settag(self, value, 'species')
@property
def net_size(self):
return self.get('net_size')
@net_size.setter
def net_size(self, value):
settag(self, value, 'net_size')
@property
def bgcolor(self):
return self.get('bgcolor')
@bgcolor.setter
def bgcolor(self, value):
settag(self, colorwrap(value), 'bgcolor')
@property
def nodecount(self):
if self.nodes is not None:
return len(self.nodes)
return 0
def _updatestatic(self):
settag(self, str(self.nodecount), 'nodecount')
@property
def edgeopp(self):
return self.get('edgeopp')
@edgeopp.setter
def edgeopp(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('edgeopp should be boolean')
settag(self, value, 'edgeopp')
@property
def fineArt(self):
return self.get('fineArt')
@fineArt.setter
def fineArt(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('fineArt should be boolean')
settag(self, value, 'fineArt')
@property
def autoFit(self):
return self.get('autoFit')
@autoFit.setter
def autoFit(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('autoFit should be boolean')
settag(self, value, 'autoFit')
@property
def layout(self):
return self.get('layout')
@layout.setter
def layout(self, value):
# circle, spoke, scramble, relax, embedded, elegant
if value is not None:
v = value.split(':')
try:
assert v[0] in ('circle', 'spoke', 'scramble', 'relax', 'embedded', 'elegant')
except:
warn('Layout must be of the following types: circle, spoke, scramble, relax, embedded, or elegant.\nThe last three may have iterations specified, e.g., "relax:100"')
settag(self, value, 'layout')
@property
def double_click(self):
return self.get('double_click')
@double_click.setter
def double_click(self, value):
settag(self, value, 'double_click')
@property
def db_online(self):
return self.get('db_online')
@db_online.setter
def db_online(self, value):
settag(self, value, 'db_online')
class method(VisMLElement):
"""
Method class
name: the method id used in VisANT, required. If you need to have your own method, please put the id in the range M7000-M7999 so that VisANT will not enable querying all interaction of this method in the databases.
desc: the description of the method, required.
type: the type of the method, either "E" or "C" to indicate experimental or computational, required.
visible: indicates whether the edge determined by this method only is visible or not, default is true, optional.
weight: the reliability score of the method, optional, not used so far.
"""
def __init__(self, name=None, desc=None, type=None, visible=None, weight=None, species=None, color=None):
VisMLElement.__init__(self, 'method')
self.name = name
self.desc = desc
self.type = type
self.color = color
self.weight = weight
self.visible = visible
self.species = species
def validate(self):
try:
assert all(self.name, self.desc, self.type)
except:
warn('Missing required method data (name, desc, type)')
return True
@property
def name(self):
return self.get('name')
@name.setter
def name(self, value):
if value:
if (value[0] != 'M'):
warn('Method name should be of the form M####, where # are integers')
try:
int(value[1:])
except:
warn('Method name should be of the form M####, where # are integers')
settag(self, value, 'name')
@property
def desc(self):
return self.get('desc')
@desc.setter
def desc(self, value):
settag(self, value, 'desc')
@property
def type(self):
return self.get('type')
@type.setter
def type(self, value):
if value and value not in ('E', 'C'):
warn('Method type should be E or C')
settag(self, value, 'type')
@property
def visible(self):
return self.get('visible')
@visible.setter
def visible(self, value):
settag(self, value, 'visible')
@property
def weight(self):
return self.get('weight')
@weight.setter
def weight(self, value):
settag(self, value, 'weight')
@property
def species(self):
return self.get('species')
@species.setter
def species(self, value):
settag(self, value, 'species')
@property
def color(self):
return self.get('color')
@color.setter
def color(self, value):
settag(self, colorwrap(value, method_format=True), 'color')
class pathway(VisMLElement):
"""
Denotes a pathway annotation. New in VisML 1.35
Has two tags, name (internal ID) and title (a description)
Both are required
"""
def __init__(self, name=None, title=None):
VisMLElement.__init__(self, 'pathway')
self.name = name
self.title = title
def validate(self):
try:
assert all(self.name, self.title)
except:
warn('Missing required pathway data (name, title)')
return True
@property
def name(self):
return self.get('name')
@name.setter
def name(self, value):
settag(self, value, 'name')
@property
def title(self):
return self.get('title')
@title.setter
def title(self, value):
settag(self, value, 'title')
class VScreen(VisMLElement):
"""
The <VScreen> is an optional element used to retain the zooming level of the network.
It is not suggest for user to customize it.
In case you do need this element, it is highly suggest you use VisANT to zoom at the level you prefer and then save the network.
You can then copy the elements into your own VisML file.
x1 y1 x2 y2 w h ps
"""
def __init__(self, x1=None, y1=None, x2=None, y2=None, w=None, h=None, ps=None):
VisMLElement.__init__(self, 'VScreen')
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.w = w
self.h = h
self.ps = ps
def validate(self):
try:
assert all(self.x1, self.x2, self.y1, self.y2, self.w, self.h, self.ps)
except:
warn('Missing required VScreen data (x1, x2, y1, y2, w, h, ps)')
return True
@property
def x1(self):
return self.get('x1')
@x1.setter
def x1(self, value):
settag(self, value, 'x1')
@property
def x2(self):
return self.get('x2')
@x2.setter
def x2(self, value):
settag(self, value, 'x2')
@property
def y1(self):
return self.get('y1')
@y1.setter
def y1(self, value):
settag(self, value, 'y1')
@property
def y2(self):
return self.get('y2')
@y2.setter
def y2(self, value):
settag(self, value, 'y2')
@property
def w(self):
return self.get('w')
@w.setter
def w(self, value):
settag(self, value, 'w')
@property
def h(self):
return self.get('h')
@h.setter
def h(self, value):
settag(self, value, 'h')
@property
def ps(self):
return self.get('ps')
@ps.setter
def ps(self, value):
settag(self, value, 'ps')
class exLink(VisMLElement):
"""
The <exLink> element allows you to add links to external databases for both node and edge, in associated with element <id>.
When the attribute name of element <id> under <data> matches the attribute name of element <exLink>, a menu will be created in VisANT
with the name determined by the attribute menu_name in element <exLink> and clicking the menu will launch the default browser
with the URL determined by the URL attribute of <exLink> and the value attribute of <id> element under <data> element.
"""
def __init__(self, id=None, menu_name=None, URL=None):
VisMLElement.__init__(self, 'exLink')
self.id = None
self.menu_name = None
self.URL = None
def validate(self):
try:
assert all(self.id, self.menu_name, self.URL)
except:
warn('Missing required exLink data (id, menu_name, URL)')
# TODO: Validate that there exists a VNodes element with the correct id
return True
@property
def id(self):
return self.get('id')
@id.setter
def id(self, value):
settag(self, value, 'id')
@property
def menu_name(self):
return self.get('menu_name')
@menu_name.setter
def menu_name(self, value):
settag(self, value, 'menu_name')
@property
def URL(self):
return self.get('URL')
@URL.setter
def URL(self, value):
settag(self, value, 'URL')
class Nodes(VisMLElement):
"""
Container for VNodes elements
has a single optional element, size (size of nodes)
"""
def __init__(self, size=None, **kwds):
VisMLElement.__init__(self, 'Nodes')
self.size = None
@property
def size(self):
return self.get('size')
@size.setter
def size(self, value):
settag(self, value, 'size')
def add_node(self, name, x, y, type, index, vlabel=None, children=None, w='16', h='16', **attrib):
attrib.update({'x': x, 'y': y, 'w': w, 'h': h, 'name': name, 'index': index, 'type': type})
node = VNodes(**attrib)
settext(node, vlabel, 'vlabel')
settext(node, children, 'children')
node.append(data(**attrib))
self.append(node)
return node
class VNodes(VisMLElement):
"""
x: x coordinate, required.
y: y coordinate, required.
counter: reference counter, its value equals to the number of links connecting to the node. It is also used to determine whether the node is visible or not, set it to at least 1 if the node should be visible, required.
w: width of the node, required.
h: height of the node, required.
labelOn: determine whether the node label is visible, optional, default false.
linkDisplayed: used to indicate whether all the nodes link is displayed, special designed for step-wise expansion of the interaction network, optional, default false. When it is true, the node show "-" sign, otherwise, "+" sign.
linkLoaded: indicated whether the node has been queried against database , optional, default false. Set it to be true if you do not want to query database when double-click the node.
extend: designed for step-wise expansion of the interaction network, optional, default true. Set it to true if you do not want the node position to be changed when double-click the node.
size: node size, optional. Default -1:auto, Range: 4-30.
ncc: RGB value of node color, optional.
labelSize: size of the node label, optional. Default -1:auto, Range: 6-25.
labelPos: position of the label, optional. Default -1:auto, 0:center, 1:left, 2:right, 3:above, 4:below
esymbol: determine whether to show the extension symbol (+/-) or not, optional, default is true.
ksymbol:
group: if the node is in a group, this attribute represents the corresponding group id, optional.
exShape: metanode polygon shape, optional. Current options are None: auto, 2001: convex_polygon, and 2002: round_rectangle
properties not present in the xml:
visible: changes the counter to negative, which prevents the node from being shown
"""
def __init__(self, x=None, y=None, counter=None, w=None, h=None, exShape=None, labelOn=None, linkDisplayed=None, linkLoaded=None, extend=None, size=None, ncc=None, labelStyle=None, labelSize=None, labelPos=None, esymbol=None, ksymbol=None, group=None, axisnode=None, visible=True, eletag='VNodes', dt=None, childVisible=None, **kwds):
# We make an exception here so Dup can completely inherit VNodes methods and properties
VisMLElement.__init__(self, eletag)
# XXX Static property
# self.counter = counter
self.x = x
self.y = y
self.w = w
self.h = h
self.labelOn = labelOn # Bool
self.linkDisplayed = linkDisplayed # Bool
self.linkLoaded = linkLoaded # Bool
self.extend = extend # Bool
self.esymbol = esymbol # Bool
self.ksymbol = ksymbol # Bool
self.size = size
self.ncc = colorwrap(ncc)
self.labelSize = labelSize
self.labelStyle = labelStyle
self.labelPos = labelPos
self.group = group
self.axisnode = axisnode
self.visible = visible
self.exShape = exShape
self.duplicate_index = -1
self.dt = dt
self.childVisible = childVisible
def validate(self):
try:
assert all(self.x, self.y, self.w, self.h)
except:
warn('Missing required VNodes data (x, y, w, h)')
# TODO: Validate that there are metanodes for all claimed group ids
return True
def add_duplicate(self, x, y, index, vlabel=None, w='16', h='16', **attrib):
if self.isduplicate:
raise ValueError, 'Unable to duplicate a duplicate node'
idx = self._inc_dup_index()
uid = '_'.join([self.name, idx])
attrib.update({'x': x, 'y': y, 'w': w, 'h': h, 'uid': uid, 'index': index, 'parent': self})
dup = Dup(**attrib)
settext(dup, vlabel, 'vlabel')
self.append(dup)
return dup
def add_group(self, name):
"""Adds the specified group name to groups. Does not change the group node children! Use VisMLTree.add_node_to_metanode instead"""
if self.isduplicate:
raise ValueError, 'Unable to add group to duplicate node. Use node.group = value instead'
self.data.add_group(name)
def remove_group(self, name):
"""Adds the specified group name to groups. Does not change the group node children! Use VisMLTree.remove_node_from_metanode instead"""
if self.isduplicate:
raise ValueError, 'Unable to remove group from duplicate node. Use node.group = value instead'
self.data.remove_group(name)
def add_pathway(self, name):
if self.isduplicate:
raise ValueError, 'Unable to set pathways for a duplicate node'
self.data.add_pathway(name)
def remove_pathway(self, name):
if self.isduplicate:
raise ValueError, 'Unable to set pathways for a duplicate node'
self.data.remove_pathway(name)
def add_child(self, name):
if self.isduplicate:
raise ValueError, 'Unable to set children for a duplicate node'
self.children = add_to_set(self.children, name)
def remove_child(self, name):
if self.isduplicate:
raise ValueError, 'Unable to set children for a duplicate node'
self.children = rem_from_set(self.children, name)
def add_link(self, to, method, **attrib):
self.data.add_link(to, method, **attrib)
def remove_link(self, name):
self.data.remove_link(name)
def _inc_dup_index(self):
self.duplicate_index += 1
return str(self.duplicate_index)
@property
def isduplicate(self):
if 'uid' in self.attrib:
return True
return False
@property
def type(self):
return self.data.type
@property
def links(self):
return self.data.links
@property
def children(self):
if self.find('children') is not None:
if self.find('children').text is not None:
return self.find('children').text.split(',')[:-1] # format is CHILD1,CHILD2,
@children.setter
def children(self, value):
assert isinstance(value, list) or value is None
if not value:
if self.children is not None:
self.remove(self.find('children'))
return
text = ','.join(value + ['']) # format is CHILD1,CHILD2,
if self.children is not None:
self.find('children').text = text
else:
self.append(children(text))
@property
def data(self):
if self.isduplicate:
raise ValueError, 'Duplicate node has no data container'
return self.find('data')
@property
def ismetanode(self):
return self.data.ismetanode
@property
def duplicates(self):
return self.findall('Dup')
@property
def pathways(self):
return self.data.pathways
@pathways.setter
def pathways(self, value):
self.data.pathways = value
@property
def desc(self):
return self.data.desc
@desc.setter
def desc(self, value):
self.data.desc = value
@property
def groups(self):
if self.isduplicate:
return [self.group]
return self.data.groups
@groups.setter
def groups(self, value):
if self.isduplicate:
if isinstance(value, list):
raise ValueError, 'Trying to set a group list for a duplicate node!'
self.group = value
return
self.data.groups = value
@property
def name(self):
return self.data.name
@name.setter
def name(self, value):
# TODO: This should regenerate all the links to this node
self.data.name = value
@property
def vlabel(self):
if self.find('vlabel') is not None:
return self.find('vlabel').text
@vlabel.setter
def vlabel(self, value):
settext(self, value, 'vlabel')
@property
def duplicates(self):
return self.findall('Dup')
@property
def counter(self):
# TODO: Real data
if self.visible:
return 1
return -1
def _updatestatic(self):
settag(self, str(self.counter), 'counter')
# XXX findall has reported [] instead of None in certain test cases
if self.duplicates:
self.duplicate_index = max([ int(uid.split('_')[-1]) for uid in [ dup.uid for dup in self.duplicates ] ])
@property
def x(self):
return self.get('x')
@x.setter
def x(self, value):
settag(self, value, 'x')
@property
def y(self):
return self.get('y')
@y.setter
def y(self, value):
settag(self, value, 'y')
@property
def w(self):
return self.get('w')
@w.setter
def w(self, value):
settag(self, value, 'w')
@property
def h(self):
return self.get('h')
@h.setter
def h(self, value):
settag(self, value, 'h')
@property
def childVisible(self):
return self.get('childVisible')
@childVisible.setter
def childVisible(self, value):
settag(self, value, 'childVisible')
@property
def axisnode(self):
return self.get('axisnode')
@axisnode.setter
def axisnode(self, value):
settag(self, value, 'axisnode')
@property
def labelOn(self):
return self.get('labelOn')
@labelOn.setter
def labelOn(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('labelOn should be boolean')
settag(self, value, 'labelOn')
@property
def exShape(self):
return self.get('exShape')
@exShape.setter
def exShape(self, value):
try:
if value is not None:
assert value in ['2001', '2002']
except:
warn('Current exShape support includes None, "2001", and "2002", which is auto, convex_polygon, and round_rectangle, respectively')
value = None
settag(self, value, 'exShape')
@property
def linkDisplayed(self):
return self.get('linkDisplayed')
@linkDisplayed.setter
def linkDisplayed(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('linkDisplayed should be boolean')
settag(self, value, 'linkDisplayed')
@property
def linkLoaded(self):
return self.get('linkLoaded')
@linkLoaded.setter
def linkLoaded(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('linkLoaded should be boolean')
settag(self, value, 'linkLoaded')
@property
def size(self):
return self.get('size')
@size.setter
def size(self, value):
settag(self, value, 'size')
@property
def extend(self):
return self.get('extend')
@extend.setter
def extend(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('extend should be boolean')
settag(self, value, 'extend')
@property
def esymbol(self):
return self.get('esymbol')
@esymbol.setter
def esymbol(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('esymbol should be boolean')
settag(self, value, 'esymbol')
@property
def ksymbol(self):
return self.get('ksymbol')
@ksymbol.setter
def ksymbol(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('ksymbol should be boolean')
settag(self, value, 'ksymbol')
@property
def ncc(self):
return self.get('ncc')
@ncc.setter
def ncc(self, value):
settag(self, colorwrap(value), 'ncc')
@property
def dt(self):
return self.get('dt')
@dt.setter
def dt(self, value):
# Relevant VisANT codeblock:
# //node shape
# public int CIRCLE=0,RECTANGLE=4, VBALL=62, CIRCLE_COMPOUND=63,TRIANGLE=64, DIAMOND=65, CIRCLE_DRUG=69;
# public int HEXAGON=66, OCTAGON=67, SQUARE=68, ROUND_RECTANGLE=3, RECTANGLE_3D=1;
# //10-19 resevered for the shape fitted for the label
# public int RECTANGLE_FIT=10,ROUND_RECTANGLE_FIT=11,RECTANGLE_3D_FIT=19;
# public int EXP_CONVEXITY=2001, EXP_RECTANGLE=2002, EXP_AUTO=-1;
# public int EXPRESSION_PLOT=1000;
shapes = {'circle': 0, 'rectangle': 4, 'vball': 62, 'circle_compound': 63, 'triangle': 64,
'diamond': 65, 'circle_drug': 69, 'hexagon': 66, 'octagon': 67, 'square': 68,
'round_rectangle': 3, 'rectangle_3D': 1, 'rectangle_fit': 10, 'round_rectangle_fit': 11,
'rectangle_3D_fit': 19, 'exp_convexity': 2001, 'exp_rectangle': 2002, 'exp_auto': -1,
'expression_plot': 1000}
try:
v = str(int(value))
except:
# Shape name given
if value is not None:
try:
assert value in shapes
v = str(shapes[value])
except:
warn('Unknown shape. Allowed values: ' + ', '.join(shapes.keys()) + '\n')
v = None
else:
v = None
settag(self, v, 'dt')
@property
def labelPos(self):
return self.get('labelPos')
@labelPos.setter
def labelPos(self, value):
settag(self, value, 'labelPos')
@property
def labelSize(self):
return self.get('labelSize')
@labelSize.setter
def labelSize(self, value):
settag(self, value, 'labelSize')
@property
def labelStyle(self):
return self.get('labelStyle')
@labelStyle.setter
def labelStyle(self, value):
settag(self, value, 'labelStyle')
@property
def group(self):
return self.get('group')
@group.setter
def group(self, value):
# TODO: Validate the existence of a metanode to assign this to?
settag(self, value, 'group')
class vlabel(VisMLElement):
"""
Optional node label, no children or tags (only text)
"""
def __init__(self, text):
VisMLElement.__init__(self, 'vlabel')
self.text = text
class children(VisMLElement):
"""
Metanode child container, no children or tags (only text)
"""
def __init__(self, text):
VisMLElement.__init__(self, 'children')
self.text = text
class desc(VisMLElement):
"""
data or link child container, no children or tags (only text)
"""
def __init__(self, text):
VisMLElement.__init__(self, 'desc')
self.text = text
class id(VisMLElement):
"""
child of data or link
the <id> element can also be used to create HTTP link by matching to the id attribute of the <exLink> element.
"""
def __init__(self, name=None, value=None):
VisMLElement.__init__(self, 'id')
self.name = name
self.value = value
def validate(self):
try:
assert all(self.name, self.value)
except:
warn('Element id missing required tags (self.name, self.value)')
return True
@property
def name(self):
return self.get('name')
@name.setter
def name(self, value):
settag(self, value, 'name')
@property
def value(self):
return self.get('value')
@value.setter
def value(self, value):
settag(self, value, 'value')
class group(VisMLElement):
"""
child of data
These elements will appear if the node represented by the data element is in a group. This element has two attributes: name determines the type of group ("common" for most cases),
while the value represents the group ids delimited by ",", in case there are more than one group (in such case, the node itself must have duplications).
"""
def __init__(self, name=None, value=None):
VisMLElement.__init__(self, 'group')
self.name = name
self.value = value
def validate(self):
try:
assert all(self.name, self.value)
except:
warn('Element group missing required tags (self.name, self.value)')
return True
@property
def name(self):
return self.get('name')
@name.setter
def name(self, value):
settag(self, value, 'name')
@property
def value(self):
return self.get('value')
@value.setter
def value(self, value):
settag(self, value, 'value')
class Dup(VNodes):
"""
In case of duplication, this element will be appear under <VNodes> element. this element describes visual properties of the duplicated nodes and have similar attributes as <VNodes>, except following:
uid: the identification of the duplicated node, required.
group: if the duplicated node is in a group, this attribute denotes the corresponding group id. optional.
"""
def __init__(self, uid=None, index=None, x=None, y=None, counter=None, w=None, h=None, labelOn=None, linkDisplayed=None, linkLoaded=None,
extend=None, size=None, ncc=None, labelSize=None, labelPos=None, esymbol=None, ksymbol=None, group=None, visible=True, parent=None):
VNodes.__init__(self, x=x, y=y, counter=counter, w=w, h=h, labelOn=labelOn, linkDisplayed=linkDisplayed, linkLoaded=linkLoaded, extend=extend,
size=size, ncc=ncc, labelSize=labelSize, labelPos=labelPos, esymbol=esymbol, ksymbol=ksymbol, group=group, visible=visible, eletag='Dup')
self.parent = parent
self.uid = uid
self.index = index
def validate(self):
try:
assert all(self.uid)
except:
warn('Dup nodes require uid to be specified')
return True
@property
def uid(self):
return self.get('uid')
@uid.setter
def uid(self, value):
settag(self, value, 'uid')
@property
def index(self):
return self.get('index')
@index.setter
def index(self, value):
settag(self, value, 'index')
class link(VisMLElement):
"""
child of data
This element has following attributes:
to: the node name that this link connects to, required.
toDup: If this node points to a specific duplicate in the target, its uid should be here
fromDup: If a duplicate in this node points to a target, its uid should be here
method: the method id associated with the link. If there is a literature reference corresponding pubmed id can be appended as the format shown below. required.
toType: integer number to determine the type of the to-end of the edge, optional, default is 0. Its value can be ranged (please reference KEGG database for the meaning of different edge type:
fromType: integer number to determine the type of the from-end of the edge, optional. The value has the exact same range as toType.
weight: value for edge weight. Can specify multiple method weights with weight=MXXXX:value, currently unsupported by PyVisML
"""
# XXX Support method weights
def __init__(self, to=None, method=None, toType=None, fromType=None, weight=None, desc=None, toDup=None, fromDup=None, **kwds):
VisMLElement.__init__(self, 'link')
self.to = to
self.method = method
self.toType = toType
self.fromType = fromType
self.desc = desc
self.fromDup = fromDup
self.toDup = toDup
self.weight = weight
def validate(self):
try:
assert all(self.to, self.method)
except:
warn('link element missing required data (to, method)')
return True
@property
def target(self):
if self.toDup is not None:
return self.toDup
return self.to
@property
def desc(self):
if self.find('desc') is not None:
return self.find('desc').text
@desc.setter
def desc(self, value):
settext(self, value, 'desc')
@property
def to(self):
return self.get('to')
@to.setter
def to(self, value):
settag(self, value, 'to')
@property
def method(self):
return self.get('method')
@method.setter
def method(self, value):
if value:
if (value[0] != 'M'):
warn('Method name should be of the form M####, where # are integers')
try:
int(value[1:])
except:
warn('Method name should be of the form M####, where # are integers')
settag(self, value, 'method')
@property
def toType(self):
return self.get('toType')
@toType.setter
def toType(self, value):
settag(self, value, 'toType')
@property
def fromDup(self):
return self.get('fromDup')
@fromDup.setter
def fromDup(self, value):
settag(self, value, 'fromDup')
@property
def toDup(self):
return self.get('toDup')
@toDup.setter
def toDup(self, value):
settag(self, value, 'toDup')
@property
def fromType(self):
return self.get('fromType')
@fromType.setter
def fromType(self, value):
settag(self, value, 'fromType')
@property
def weight(self):
return self.get('weight')
@weight.setter
def weight(self, value):
settag(self, value, 'weight')
class data(VisMLElement):
"""
VNode data child
This element has following attributes:
name: default node name, required.
index: integer used to identify node, should be different for each <data> element, including <Dup>, required.
type: type of the node, optional, default 0. built in value=0:normal protein/gene, 1:chemical compound, 2:KEGG Map,3: general group node, 4:Protein Complex, 5:Node of Go Term, 6:Pathway Node, 7:KEGG group node, >=100:user added
alias: a list of alias delimited ",".
The <data> element may have following child elements:
<desc>: description of the node, will be shown in the tooltip when mouse-over the node, optional.
<id>: external database ids of the node, optional. If its name attribute matches the id attribute of <exLink> element,
a menu will be created to allow the http link to external database as mentioned earlier. There can be more than one id elements.
<link>: this element represents a link starts from the node represented by element, and stops at the node indicated by the to attribute, optional (a node can have no links/edges).
It needs to be aware that one edge may have more than one links.
<group>: this elements will appear if the node represented by the data element is in groups.
This element has two attribute: name determines the type of group ("common" for most cases),
while the value represents the group ids delimited by ",", in case there are more than one group (in such case, the node itself must have duplications).
"""
def __init__(self, name=None, index=None, type=None, alias=None, pws=None, **kwds):
VisMLElement.__init__(self, 'data')
self.ismetanode = None # Set this first because self.type assignment will overwrite it
self.name = name
self.index = index
self.type = type
self.alias = alias
self.pws = pws
def validate(self):
try:
assert all(self.name, self.index)
except:
warn('Missing required data tags (name, index)')
if self.type is not None:
if self.metanode:
pass
# TODO: Validate that it has children
# TODO: Assert index is unique
return True
def add_link(self, to, method, **attrib):
# TODO: dict
target = to
if 'toDup' in attrib:
target = attrib['toDup']
if self.links is not None:
for edge in self.links:
if edge.target == target:
return
self.append(link(to=to, method=method, **attrib))
def remove_link(self, target):
# TODO: dict
if self.links is not None:
for edge in self.links:
if edge.target == target:
self.remove(edge)
return
def remove_group(self, name):
self.groups = rem_from_set(self.groups, name)
def remove_pathway(self, name):
self.pathways = rem_from_set(self.pathways, name)
def add_group(self, name):
self.groups = add_to_set(self.groups, name)
def add_pathway(self, name):
self.pathways = add_to_set(self.pathways, name)
@property
def _group_dict(self):
"""Returns a dict of group children, where keys are 'name' tags and values are the group children elements themselves"""
if self.find('group') is not None:
return dict([ (grp.name, grp) for grp in self.findall('group') ])
@property
def desc(self):
if self.find('desc') is not None:
return self.find('desc').text
@desc.setter
def desc(self, value):
settext(self, value, 'desc')
@property
def aliases(self):
if self.alias:
return self.alias.split(',')
def _set_groups(self, name, value):
if value:
if self._group_dict is not None and name in self._group_dict:
self._group_dict[name].value = value
else:
self.append(group(name=name, value=value))
else:
if self._group_dict is not None and name in self._group_dict:
self.remove(self._group_dict[name])
@property
def pathways(self):
"""A list of pathways the node belongs to. Note that the data tag 'pws' will override any 'group' child element of data which has 'pathways' as its name"""
if self.pws:
return self.pws.split(' ')
@pathways.setter
def pathways(self, value):
assert isinstance(value, list) or value is None
plist = value
if value is not None:
plist = ' '.join(value)
settag(self, plist, 'pws')
self._set_groups('pathway', plist)
@property
def groups(self):
"""A list of groups the node belongs to, if any."""
if self._group_dict and 'common' in self._group_dict:
return self._group_dict['common'].value.split(',')
@groups.setter
def groups(self, value):
assert isinstance(value, list) or value is None
glist = value
if value is not None:
glist = ','.join(value)
self._set_groups('common', glist)
@property
def links(self):
return self.findall('link')
@property
def ids(self):
return self.findall('id')
@property
def type(self):
return self.get('type')
@type.setter
def type(self, value):
try:
v = int(value)
except:
warn('Type should be an integer')
if v in range(3, 8):
self.ismetanode = True
settag(self, value, 'type')
@property
def pws(self):
return self.get('pws')
@pws.setter
def pws(self, value):
settag(self, value, 'pws')
@property
def name(self):
return self.get('name')
@name.setter
def name(self, value):
#if isinstance(value, str):
# value = value.upper()
settag(self, value, 'name')
@property
def index(self):
return self.get('index')
@index.setter
def index(self, value):
settag(self, value, 'index')
@property
def alias(self):
return self.get('alias')
@alias.setter
def alias(self, value):
settag(self, value, 'alias')
class Edges(VisMLElement):
"""
edges are listed under <Edges> element named as VEdge.
Element <Edges> has following attributes:
thick_line: the option to use the line thickness to represent the edge weight, default is true, optional.
color_line: the option to use the line color to represent the edge weight, default is false, optional.
exclude_meta: the option to exclude the metaedges when use weight cutoff, optional.
Following lists an example of this element with all attributes on.
<Edges thick_line="false" color_line="true" exclude_meta="true">
"""
def __init__(self, thick_line=None, color_line=None, exclude_meta=None, opacity=None, **kwds):
VisMLElement.__init__(self, 'Edges')
self.thick_line = thick_line
self.color_line = color_line
self.exclude_meta = exclude_meta
self.opacity = opacity
self._edge_dict = {}
def validate(self):
# TODO: assert there are edges?
return True
def isconnected(self, node1, node2):
"""Returns true if a link between node names name1 and name2 is found in the edge list"""
node1_name, node2_name = node1, node2
if isinstance(node1, VNodes):
node1_name = get_node_name(node1)
if isinstance(node2, VNodes):
node2_name = get_node_name(node2)
if node1_name in self._edge_dict and node2_name in self._edge_dict[node1_name]:
return True
return False
def add_edge(self, node1, node2, **attrib):
"""
Helper function to create an edge between node1 and node2
Updates the internal edge dictionary
"""
if isinstance(node1, VNodes):
node1_name = get_node_name(node1)
if isinstance(node2, VNodes):
node2_name = get_node_name(node2)
if not self.isconnected(node1_name, node2_name):
attrib.update({'linkFrom': node1_name, 'to': node2_name})
edge = VEdge(**attrib)
self.append(edge)
self._edge_dict.setdefault(node1_name, set()).add(node2_name)
self._edge_dict.setdefault(node2_name, set()).add(node1_name)
def remove_edge(self, node1, node2):
"""
Helper function to remove an edge between node1 and node2
Updates the internal edge dictionary
"""
if isinstance(node1, VNodes):
node1_name = get_node_name(node1)
if isinstance(node2, VNodes):
node2_name = get_node_name(node2)
if self.isconnected(node1_name, node2_name):
for edge in self.edges:
if (edge.linkFrom == node1_name and edge.to == node2_name) or (edge.linkFrom == node2_name and edge.to == node1_name):
self.remove(edge)
self._edge_dict[node1_name].remove(node2_name)
self._edge_dict[node2_name].remove(node1_name)
return
@property
def edges(self):
return self.findall('VEdge')
@property
def thick_line(self):
return self.get('thick_line')
@thick_line.setter
def thick_line(self, value):
settag(self, value, 'thick_line')
@property
def opacity(self):
return self.get('opacity')
@opacity.setter
def opacity(self, value):
settag(self, value, 'opacity')
@property
def color_line(self):
return self.get('color_line')
@color_line.setter
def color_line(self, value):
settag(self, value, 'color_line')
@property
def exclude_meta(self):
return self.get('exclude_meta')
@exclude_meta.setter
def exclude_meta(self, value):
settag(self, value, 'exclude_meta')
def _updatestatic(self):
ed = {}
for edge in self.edges:
ed.setdefault(edge.linkFrom, set()).add(edge.to)
ed.setdefault(edge.to, set()).add(edge.linkFrom)
self._edge_dict = ed
class VEdge(VisMLElement):
"""
edges are listed under <Edges> element named as VEdge.
Element <VEdge> has following attributes:
from: the id of from node, required. Renamed to linkFrom to stop collisions with python keywords
to: the id of to node, required.
elabel: the label of the edge, optional.
la: boolean flag to determine whether the edge label shown be visible, optional, default false.
"""
def __init__(self, linkFrom=None, to=None, elabel=None, la=None, **kwds):
VisMLElement.__init__(self, 'VEdge')
if 'from' in kwds:
self.linkFrom = kwds['from']
else:
self.linkFrom = linkFrom
self.to = to
self.elabel = elabel
self.la = la
def validate(self):
try:
assert all(self.linkFrom, self.to)
except:
warn('Element VEdge missing required tags (linkFrom [from], to)')
# TODO: Validate link from and to nodes as existent
return True
@property
def linkFrom(self):
return self.get('from')
@linkFrom.setter
def linkFrom(self, value):
settag(self, value, 'from')
@property
def to(self):
return self.get('to')
@to.setter
def to(self, value):
settag(self, value, 'to')
@property
def elabel(self):
return self.get('elabel')
@elabel.setter
def elabel(self, value):
settag(self, value, 'elabel')
@property
def la(self):
return self.get('la')
@la.setter
def la(self, value):
try:
if value is not None:
assert value in ['true', 'false']
except:
warn('la should be boolean')
settag(self, value, 'la')
|
gpl-3.0
|
shrkey/ardupilot
|
Tools/mavproxy_modules/lib/magcal_graph_ui.py
|
108
|
8248
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
|
gpl-3.0
|
mYstar/McEA
|
analysis/visualization/plot_scatter3d.py
|
1
|
1294
|
'''
==============
3D scatterplot
==============
'''
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import sys
import random
import csv
file = sys.argv[1]
points = int(sys.argv[2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# read the optimal values
optimal = []
with open('../solutions_dtlz/DTLZ7.3D.pf', 'rb') as csvfile:
num_lines = sum(1 for line in csvfile)
csvfile.seek(0)
fitreader = csv.reader(csvfile, delimiter='\t')
for row in fitreader:
if random.random() < float(points) / num_lines:
optimal.append(map(float, row[:-1]))
optimal = zip(*optimal)
xo = optimal[0]
yo = optimal[1]
zo = optimal[2]
ax.scatter(xo, yo, zo, c=( 1.0, 0.0, 0.0, 0.1 ), marker='.')
# read the fitness values
fitness = []
with open(file, 'rb') as csvfile:
num_lines = sum(1 for line in csvfile)
csvfile.seek(0)
fitreader = csv.reader(csvfile, delimiter='\t')
for row in fitreader:
if random.random() < float(points) / num_lines:
fitness.append(map(float, row[:-1]))
fitness = zip(*fitness)
xs = fitness[0]
ys = fitness[1]
zs = fitness[2]
ax.scatter(xs, ys, zs, c='b', marker='^')
ax.set_xlabel('crit 1')
ax.set_ylabel('crit 2')
ax.set_zlabel('crit 3')
plt.show()
|
apache-2.0
|
mpclay/Spectral2D
|
analysis/TaylorGreen.py
|
1
|
6980
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2014 by Matthew Clay <[email protected]>
#
# File: TaylorGreen.py
# Author: Matthew Clay
#
# This script post-process Taylor-Green restart files to assess the temporal
# accuracy of the Spectral2D code. The Taylor-Green vortex is an exact solution
# of the Navier-Stokes equations on a periodic domain, and has solution:
#
# psi(x,y,t) = sin(x)*sin(y)*F(t)
# w(x,y,t) = 2*sin(x)*sin(y)*F(t)
# u(x,y,t) = sin(x)*cos(y)*F(t)
# v(x,y,t) = -cos(x)*sin(y)*F(t),
#
# where psi is the streamfunction, w is the vorticity, u is the velocity in the
# x direction, v is the velocity in the y direction, F(t) = exp(-2*nu*t), and
# nu is the kinematic viscosity.
#
# NOTE: In this analysis we assume that the final time and viscosity for all of
# the simulations are the same. Doing temporal analysis without these parameters
# being the same would be useless.
#
import numpy as np
import h5py as h5
import matplotlib.pyplot as plt
# Function to calculate the L2 error between the exact and numerical solution.
def L2Error(dx, dy, psiE, wE, uE, vE, psiN, wN, uN, vN):
psiDiff = np.subtract(psiE, psiN)
wDiff = np.subtract(wE, wN)
uDiff = np.subtract(uE, uN)
vDiff = np.subtract(vE, vN)
errPsi = np.sqrt(np.sum(np.square(psiDiff))*dx*dy)
errW = np.sqrt(np.sum(np.square(wDiff))*dx*dy)
errU = np.sqrt(np.sum(np.square(uDiff))*dx*dy)
errV = np.sqrt(np.sum(np.square(vDiff))*dx*dy)
return errPsi, errW, errU, errV
# Function to calculate the LInf error between the exact and numerical solution.
def LInfError(psiE, wE, uE, vE, psiN, wN, uN, vN):
errPsi = np.max(np.abs(np.subtract(psiE, psiN)))
errW = np.max(np.abs(np.subtract(wE, wN)))
errU = np.max(np.abs(np.subtract(uE, uN)))
errV = np.max(np.abs(np.subtract(vE, vN)))
return errPsi, errW, errU, errV
# List of restart files to use when assessing temporal accuracy.
rests = ['REST_000001-00100.h5', \
'REST_000001-01000.h5', \
'REST_000001-10000.h5']
nRest = len(rests)
# Grid size and spacing.
fid = h5.File('GRID.h5', 'r')
nx = fid['Header'].attrs['nx']
ny = fid['Header'].attrs['ny']
dx = 2.0*np.pi/float(nx)
dy = 2.0*np.pi/float(ny)
# Get the grid.
x = np.zeros((nx, ny), np.float64)
y = np.zeros((nx, ny), np.float64)
x[:,:] = np.transpose(fid['Domain_00001']['x'])
y[:,:] = np.transpose(fid['Domain_00001']['y'])
fid.close()
# Form the necessary components to calculate the exact solution.
sinx = np.sin(x)
cosx = np.cos(x)
siny = np.sin(y)
cosy = np.cos(y)
# Calculate the exact solutions.
fid = h5.File(rests[0], 'r')
tEnd = fid['Header'].attrs['time']
nu = fid['Header'].attrs['nu']
F = np.exp(-2.0*nu*tEnd)
psiE = np.multiply(sinx, siny)*F
wE = 2.0*np.multiply(sinx, siny)*F
uE = np.multiply(sinx, cosy)*F
vE = -1.0*np.multiply(cosx, siny)*F
fid.close()
# Figure out the time steps and errors for each simulation.
dt = np.zeros(nRest, dtype=np.float64)
L2Err = np.zeros((nRest, 4), dtype=np.float64)
LInfErr = np.zeros((nRest, 4), dtype=np.float64)
psiN = np.zeros((nx, ny), np.float64)
wN = np.zeros((nx, ny), np.float64)
uN = np.zeros((nx, ny), np.float64)
vN = np.zeros((nx, ny), np.float64)
for i in range(nRest):
# Get the time step for this solution.
fid = h5.File(rests[i], 'r')
tEnd = fid['Header'].attrs['time']
nadv = fid['Header'].attrs['nadv']
dt[i] = tEnd/float(fid['Header'].attrs['nadv'])
#
# Get the data for this simulation.
psiN[:,:] = np.transpose(fid['FlowData']['Psi'])
wN[:,:] = np.transpose(fid['FlowData']['Omega'])
uN[:,:] = np.transpose(fid['FlowData']['u'])
vN[:,:] = np.transpose(fid['FlowData']['v'])
#
# Calculate the L2 error for this simulation.
psiL2, wL2, uL2, vL2 = L2Error(dx, dy, psiE, wE, uE, vE, psiN, wN, uN, vN)
L2Err[i,0] = psiL2
L2Err[i,1] = wL2
L2Err[i,2] = uL2
L2Err[i,3] = vL2
#
# Calculate the LInf error for the simulation.
psiInf, wInf, uInf, vInf = LInfError(psiE, wE, uE, vE, psiN, wN, uN, vN)
LInfErr[i,0] = psiInf
LInfErr[i,1] = wInf
LInfErr[i,2] = uInf
LInfErr[i,3] = vInf
#
# Close the data file.
fid.close()
# Calculate the error orders.
L2Order = np.zeros((nRest,4), dtype=np.float64)
LInfOrder = np.zeros((nRest,4), dtype=np.float64)
for i in range(1,nRest):
dtDiff = np.log(dt[i-1]) - np.log(dt[i])
#
# L2 errors.
psiL2Diff = np.log(L2Err[i-1,0]) - np.log(L2Err[i,0])
wL2Diff = np.log(L2Err[i-1,1]) - np.log(L2Err[i,1])
uL2Diff = np.log(L2Err[i-1,2]) - np.log(L2Err[i,2])
vL2Diff = np.log(L2Err[i-1,3]) - np.log(L2Err[i,3])
L2Order[i,0] = psiL2Diff/dtDiff
L2Order[i,1] = wL2Diff/dtDiff
L2Order[i,2] = uL2Diff/dtDiff
L2Order[i,3] = vL2Diff/dtDiff
#
# LInf errors
psiLInfDiff = np.log(LInfErr[i-1,0]) - np.log(LInfErr[i,0])
wLInfDiff = np.log(LInfErr[i-1,1]) - np.log(LInfErr[i,1])
uLInfDiff = np.log(LInfErr[i-1,2]) - np.log(LInfErr[i,2])
vLInfDiff = np.log(LInfErr[i-1,3]) - np.log(LInfErr[i,3])
LInfOrder[i,0] = psiLInfDiff/dtDiff
LInfOrder[i,1] = wLInfDiff/dtDiff
LInfOrder[i,2] = uLInfDiff/dtDiff
LInfOrder[i,3] = vLInfDiff/dtDiff
# Write out the error analysis to file.
hdr = ' dt ' + \
' psiL2Err. ' + \
' psiL2Ord. ' + \
' wL2Err. ' + \
' wL2Ord. ' + \
' uL2Err. ' + \
' uL2Ord. ' + \
' vL2Err. ' + \
' vL2Ord. ' + \
' psiLIErr. ' + \
' psiLIOrd. ' + \
' wLIErr. ' + \
' wLIOrd. ' + \
' uLIErr. ' + \
' uLIOrd. ' + \
' vLIErr. ' + \
' vLIOrd. '
np.savetxt('Error.dat', np.column_stack((dt, \
L2Err[:,0], L2Order[:,0], \
L2Err[:,1], L2Order[:,1], \
L2Err[:,2], L2Order[:,2], \
L2Err[:,3], L2Order[:,3], \
LInfErr[:,0], LInfOrder[:,0], \
LInfErr[:,1], LInfOrder[:,1], \
LInfErr[:,2], LInfOrder[:,2], \
LInfErr[:,3], LInfOrder[:,3])), \
header=hdr, fmt='%11.6e')
|
gpl-2.0
|
ykpgrr/Artificial_Neural_Network
|
Talking_Data/TalkingData_Yakup_Gorur.py
|
1
|
7179
|
'''
MIT License
Copyright (c) 2016 Yakup Görür
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
from operator import itemgetter
import random
import zipfile
import time
import shutil
from sklearn.metrics import log_loss
random.seed(2016)
def run_xgb(train, test, features, target, random_state=0):
eta = 0.025
max_depth = 7
subsample = 0.75
colsample_bytree = 0.75
start_time = time.time()
print(
'XGBoost params. ETA: {}, MAX_DEPTH: {}, SUBSAMPLE: {}, COLSAMPLE_BY_TREE: {}'.format(eta, max_depth, subsample,
colsample_bytree))
params = {
"objective": "multi:softprob",
"num_class": 12,
"booster": "gbtree",
"eval_metric": "mlogloss",
"eta": eta,
"max_depth": max_depth,
"subsample": subsample,
"colsample_bytree": colsample_bytree,
"silent": 1,
"seed": random_state,
}
num_boost_round = 500
early_stopping_rounds = 20
test_size = 0.2
X_train, X_valid = train_test_split(train, test_size=test_size, random_state=random_state)
print('Length train:', len(X_train.index))
print('Length valid:', len(X_valid.index))
y_train = X_train[target]
y_valid = X_valid[target]
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, early_stopping_rounds=early_stopping_rounds,
verbose_eval=True)
print("Validating...")
check = gbm.predict(xgb.DMatrix(X_valid[features]), ntree_limit=gbm.best_iteration)
score = log_loss(y_valid.tolist(), check)
imp = get_importance(gbm, features)
print('Importance array: ', imp)
print("Predict test set...")
test_prediction = gbm.predict(xgb.DMatrix(test[features]), ntree_limit=gbm.best_iteration)
print('Training time: {} minutes'.format(round((time.time() - start_time) / 60, 2)))
return test_prediction.tolist(), score
def create_submission(score, test, prediction):
now = datetime.datetime.now()
sub_file = 'submission_' + str(score) + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + '.csv'
print('Writing submission: ', sub_file)
f = open(sub_file, 'w')
f.write('device_id,F23-,F24-26,F27-28,F29-32,F33-42,F43+,M22-,M23-26,M27-28,M29-31,M32-38,M39+\n')
total = 0
test_val = test['device_id'].values
for i in range(len(test_val)):
str1 = str(test_val[i])
for j in range(12):
str1 += ',' + str(prediction[i][j])
str1 += '\n'
total += 1
f.write(str1)
f.close()
def map_column(table, f):
labels = sorted(table[f].unique())
mappings = dict()
for i in range(len(labels)):
mappings[labels[i]] = i
table = table.replace({f: mappings})
return table
def create_feature_map(features):
outfile = open('xgb.fmap', 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def get_importance(gbm, features):
create_feature_map(features)
importance = gbm.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=itemgetter(1), reverse=True)
return importance
def print_features_importance(imp):
for i in range(len(imp)):
print("# " + str(imp[i][1]))
print('output.remove(\'' + imp[i][0] + '\')')
def read_train_test():
#Inputfile
print('Read app events...')
ape = pd.read_csv("/Users/yakup/Downloads/TalkingData/app_events.csv")
ape['installed'] = ape.groupby(['event_id'])['is_installed'].transform('sum')
ape['active'] = ape.groupby(['event_id'])['is_active'].transform('sum')
ape.drop(['is_installed', 'is_active'], axis=1, inplace=True)
ape.drop_duplicates('event_id', keep='first', inplace=True)
ape.drop(['app_id'], axis=1, inplace=True)
#Inputfile
print('Read events...')
events = pd.read_csv("/Users/yakup/Downloads/TalkingData/events.csv", dtype={'device_id': np.str})
events['counts'] = events.groupby(['device_id'])['event_id'].transform('count')
events = pd.merge(events, ape, how='left', on='event_id', left_index=True)
events_small = events[['device_id', 'counts', 'installed', 'active']].drop_duplicates('device_id', keep='first')
#Inputfile
print('Read brands...')
pbd = pd.read_csv("/Users/yakup/Downloads/TalkingData/phone_brand_device_model.csv", dtype={'device_id': np.str})
pbd.drop_duplicates('device_id', keep='first', inplace=True)
pbd = map_column(pbd, 'phone_brand')
pbd = map_column(pbd, 'device_model')
#Inputfile
# Train
print('Read train...')
train = pd.read_csv("/Users/yakup/Downloads/TalkingData/gender_age_train.csv", dtype={'device_id': np.str})
train = map_column(train, 'group')
train = train.drop(['age'], axis=1)
train = train.drop(['gender'], axis=1)
train = pd.merge(train, pbd, how='left', on='device_id', left_index=True)
train = pd.merge(train, events_small, how='left', on='device_id', left_index=True)
train.fillna(-1, inplace=True)
#Inputfile
# Test
print('Read test...')
test = pd.read_csv("/Users/yakup/Downloads/TalkingData/gender_age_test.csv", dtype={'device_id': np.str})
test = pd.merge(test, pbd, how='left', on='device_id', left_index=True)
test = pd.merge(test, events_small, how='left', on='device_id', left_index=True)
test.fillna(-1, inplace=True)
# Features
features = list(test.columns.values)
features.remove('device_id')
return train, test, features
train, test, features = read_train_test()
print('Length of train: ', len(train))
print('Length of test: ', len(test))
print('Features [{}]: {}'.format(len(features), sorted(features)))
test_prediction, score = run_xgb(train, test, features, 'group')
print("LS: {}".format(round(score, 5)))
create_submission(score, test, test_prediction)
|
mit
|
lucabaldini/ximpol
|
ximpol/examples/grs1915_polfrac_plot.py
|
1
|
6072
|
#!/usr/bin/env python
#
# Copyright (C) 2016, the ximpol team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import pyregion
import numpy
from ximpol import XIMPOL_CONFIG, XIMPOL_DATA, XIMPOL_EXAMPLES
from ximpol import xpColor
from ximpol.utils.logging_ import logger
from ximpol.core.pipeline import xPipeline
from ximpol.evt.binning import xBinnedMap, xBinnedModulationCube
from ximpol.core.spline import xInterpolatedUnivariateSpline
from ximpol.srcmodel.img import xFITSImage
from ximpol.utils.matplotlib_ import pyplot as plt
def buildspline(spindegree):
MIN_ENERGY = 0.1
MAX_ENERGY = 15.
POL_FILE_PATH = os.path.join(XIMPOL_CONFIG, 'ascii', 'bh_spin/Polarization_spin%s.txt'%(spindegree))
# Build the polarization degree as a function of the energy.
_energy, _pol_degree, _pol_angle = numpy.loadtxt(POL_FILE_PATH, unpack=True)
#Switch to have degrees
_pol_degree /= 100.
_mask = (_energy >= MIN_ENERGY)*(_energy <= MAX_ENERGY)
_energy = _energy[_mask]
_pol_degree = _pol_degree[_mask]
fmt = dict(xname='Energy', yname='Polarization degree')
pol_degree_spline = xInterpolatedUnivariateSpline(_energy, _pol_degree, k=1, **fmt)
#Pol angle in radians
_pol_angle = numpy.deg2rad(_pol_angle)
#Switched to have degrees and not radians
#_pol_angle = _pol_angle
#_mask = (_energy >= MIN_ENERGY)*(_energy <= MAX_ENERGY)
#_energy = _energy[_mask]
_pol_angle = _pol_angle[_mask]
fmt = dict(xname='Energy', yname='Polarization angle [rad]')
pol_angle_spline = xInterpolatedUnivariateSpline(_energy, _pol_angle, k=1, **fmt)
return pol_degree_spline, pol_angle_spline
def fetch_mcubepath(spindegree):
return os.path.join(XIMPOL_DATA,'grs1915_105_spin%s_mcube.fits'%spindegree)
#from grs1519 import SIM_DURATION
SIM_DURATION =100000
NUM_RUNS = 8
def plot(angle=False):
spin05_pol_degree_spline, spin05_pol_angle_spline = buildspline(0.5)
spin05_mcube = xBinnedModulationCube(fetch_mcubepath(0.5))
spin09_pol_degree_spline, spin09_pol_angle_spline = buildspline(0.9)
spin09_mcube = xBinnedModulationCube(fetch_mcubepath(0.9))
spin998_pol_degree_spline, spin998_pol_angle_spline = buildspline(0.998)
spin998_mcube = xBinnedModulationCube(fetch_mcubepath(0.998))
spin05_mcube.fit()
spin09_mcube.fit()
spin998_mcube.fit()
spin05_fit_results = spin05_mcube.fit_results[0]
spin09_fit_results = spin09_mcube.fit_results[0]
spin998_fit_results = spin998_mcube.fit_results[0]
plt.figure('Polarization degree')
spin05_mcube.plot_polarization_degree(show=False, color='blue')
spin05_pol_degree_spline.plot(color='lightblue',label='Spin 0.5', show=False)
spin998_mcube.plot_polarization_degree(show=False, color='red')
spin998_pol_degree_spline.plot(color='lightsalmon',label='Spin 0.998', show=False)
plt.figtext(0.2, 0.85,'XIPE %s ks'%((SIM_DURATION*NUM_RUNS)/1000.),size=18)
plt.ylim([0.00,0.045])
plt.xlim([1,10])
plt.legend()
plt.show()
if angle:
plt.figure('Polarization angle')
spin05_mcube.plot_polarization_angle(show=False, degree=True, color='blue')
#Converting to degrees
spin05_y = numpy.degrees(spin05_pol_angle_spline.y)
energy = spin05_pol_angle_spline.x
plt.plot(energy, spin05_y, color='lightblue',label='Spin 0.5')
spin09_mcube.plot_polarization_angle(show=False, degree=True, color='gray')
#Converting to degrees
spin09_y = numpy.degrees(spin09_pol_angle_spline.y)
energy = spin09_pol_angle_spline.x
plt.plot(energy, spin09_y, color='lightgray',label='Spin 0.9')
spin998_mcube.plot_polarization_angle(show=False, degree=True, color='red')
spin998_y = numpy.degrees(spin998_pol_angle_spline.y)
energy = spin998_pol_angle_spline.x
plt.plot(energy, spin998_y, color='lightsalmon',label='Spin 0.998')
#spin998_pol_angle_spline.plot(color='lightsalmon',label='Spin 0.998', show=False)
plt.figtext(0.2, 0.85,'XIPE %s ks'%((SIM_DURATION*NUM_RUNS)/1000.),size=18)
plt.xlim([1,10])
plt.ylim([40,200])
plt.legend()
plt.show()
def plotmdp():
spin00_pol_degree_spline = buildspline(0.5)
spin00_mcube = xBinnedModulationCube(fetch_mcubepath(0.5))
spin998_pol_degree_spline = buildspline(0.998)
spin998_mcube = xBinnedModulationCube(fetch_mcubepath(0.998))
spin00_mcube.fit()
spin998_mcube.fit()
spin00_fit_results = spin00_mcube.fit_results[0]
spin998_fit_results = spin998_mcube.fit_results[0]
plt.figure('MDP')
spin00_mdp = spin00_mcube.mdp99[:-1]
spin998_mdp = spin998_mcube.mdp99[:-1]
emean = spin00_mcube.emean[:-1]
emin = spin00_mcube.emin[:-1]
emax = spin00_mcube.emax[:-1]
width = (emax-emin)/2.
plt.errorbar(emean,spin00_mdp,xerr=width, label='Spin 0.5',marker='o',linestyle='--')
plt.errorbar(emean,spin998_mdp,xerr=width, label='Spin 0.998',marker='o',linestyle='--')
plt.figtext(0.2, 0.85,'XIPE %s ks'%((SIM_DURATION*NUM_RUNS)/1000.),size=18)
plt.xlim([1,10])
plt.ylabel('MPD 99\%')
plt.xlabel('Energy (keV)')
plt.legend()
plt.show()
if __name__ == '__main__':
plot(True)
plotmdp()
|
gpl-3.0
|
zorroblue/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
10
|
7291
|
"""This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
def test_load_empty_lfw_people():
assert_raises(IOError, fetch_lfw_people, data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
def test_load_fake_lfw_people_too_restrictive():
assert_raises(ValueError, fetch_lfw_people, data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=100, download_if_missing=False)
def test_load_empty_lfw_pairs():
assert_raises(IOError, fetch_lfw_pairs,
data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
gem/oq-engine
|
openquake/hmtk/plotting/seismicity/max_magnitude/cumulative_moment.py
|
1
|
3897
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2021 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
Module to produce cumulative moment plot
'''
import numpy as np
import matplotlib.pyplot as plt
from openquake.hmtk.plotting.seismicity.catalogue_plots import _save_image
def plot_cumulative_moment(year, mag, figure_size=(8, 6),
filename=None, filetype='png', dpi=300, ax=None):
'''Calculation of Mmax using aCumulative Moment approach, adapted from
the cumulative strain energy method of Makropoulos & Burton (1983)
:param year: Year of Earthquake
:type year: numpy.ndarray
:param mag: Magnitude of Earthquake
:type mag: numpy.ndarray
:keyword iplot: Include cumulative moment plot
:type iplot: Boolean
:return mmax: Returns Maximum Magnitude
:rtype mmax: Float
'''
# Calculate seismic moment
m_o = 10. ** (9.05 + 1.5 * mag)
year_range = np.arange(np.min(year), np.max(year) + 1, 1)
nyr = np.int(np.shape(year_range)[0])
morate = np.zeros(nyr, dtype=float)
# Get moment release per year
for loc, tyr in enumerate(year_range):
idx = np.abs(year - tyr) < 1E-5
if np.sum(idx) > 0:
# Some moment release in that year
morate[loc] = np.sum(m_o[idx])
ave_morate = np.sum(morate) / float(nyr)
# Average moment rate vector
exp_morate = np.cumsum(ave_morate * np.ones(nyr))
if ax is None:
fig, ax = plt.subplots(figsize=figure_size)
else:
fig = ax.get_figure()
ax.step(year_range, np.cumsum(morate), 'b-', linewidth=2)
ax.plot(year_range, exp_morate, 'r-', linewidth=2)
# Get offsets
upper_morate = exp_morate + (np.max(np.cumsum(morate) - exp_morate))
lower_morate = exp_morate + (np.min(np.cumsum(morate) - exp_morate))
ax.plot(year_range, upper_morate, 'r--', linewidth=1)
ax.plot(year_range, lower_morate, 'r--', linewidth=1)
ax.axis([np.min(year), np.max(year), 0.0, np.sum(morate)])
_save_image(fig, filename, filetype, dpi)
|
agpl-3.0
|
loli/sklearn-ensembletrees
|
examples/applications/face_recognition.py
|
42
|
5390
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
joshloyal/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
45
|
11897
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorFailing(BaseEstimator):
"""Dummy classifier to test error_score in learning curve"""
def fit(self, X_subset, y_subset):
raise ValueError()
def score(self, X=None, y=None):
return None
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_error_score():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
_, _, test_scores = learning_curve(estimator, X, y, cv=3, error_score=0)
all_zeros = not np.any(test_scores)
assert(all_zeros)
def test_learning_curve_error_score_default_raise():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
anetasie/sherpa
|
sherpa/astro/data.py
|
1
|
137542
|
#
# Copyright (C) 2008, 2015, 2016, 2017, 2018, 2019, 2020
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Classes for storing, inspecting, and manipulating astronomical data sets
"""
import os.path
import logging
import warnings
import numpy
from sherpa.data import Data1DInt, Data2D, Data, Data2DInt, Data1D, IntegratedDataSpace2D
from sherpa.models.regrid import EvaluationSpace1D
from sherpa.utils.err import DataErr, ImportErr
from sherpa.utils import SherpaFloat, pad_bounding_box, interpolate, \
create_expr, parse_expr, bool_cast, rebin, filter_bins
from sherpa.utils import formatting
# There are currently (Sep 2015) no tests that exercise the code that
# uses the compile_energy_grid symbols.
from sherpa.astro.utils import arf_fold, rmf_fold, filter_resp, \
compile_energy_grid, do_group, expand_grouped_mask
info = logging.getLogger(__name__).info
warning = logging.getLogger(__name__).warning
regstatus = False
try:
from sherpa.astro.utils._region import Region
regstatus = True
except ImportError:
warning('failed to import sherpa.astro.utils._region; Region routines ' +
'will not be available')
groupstatus = False
try:
import group as pygroup
groupstatus = True
except ImportError:
groupstatus = False
warning('the group module (from the CIAO tools package) is not ' +
'installed.\nDynamic grouping functions will not be available.')
__all__ = ('DataARF', 'DataRMF', 'DataPHA', 'DataIMG', 'DataIMGInt', 'DataRosatRMF')
def _notice_resp(chans, arf, rmf):
bin_mask = None
if rmf is not None and arf is not None:
bin_mask = rmf.notice(chans)
if len(rmf.energ_lo) == len(arf.energ_lo):
arf.notice(bin_mask)
# If the response is mis-matched, determine which energy bins in the
# RMF correspond to energy bins in the ARF and which are noticed.
# Propogate the noticed RMF energy bins to the ARF energy bins.
elif len(rmf.energ_lo) < len(arf.energ_lo):
arf_mask = None
if bin_mask is not None:
arf_mask = numpy.zeros(len(arf.energ_lo), dtype=bool)
for ii, val in enumerate(bin_mask):
if val:
los = (rmf.energ_lo[ii],)
his = (rmf.energ_hi[ii],)
grid = (arf.energ_lo, arf.energ_hi)
idx = filter_bins(los, his, grid).nonzero()[0]
arf_mask[idx] = True
arf.notice(arf_mask)
else:
if rmf is not None:
bin_mask = rmf.notice(chans)
if arf is not None:
arf.notice(bin_mask)
def display_header(header, key):
"""Return the header value for display by _repr_html
The value is not displayed if it doesn't exist, is None,
is empty, or is the string 'NONE'. This is intended for
PHA responses.
Parameters
----------
header : dict-like
key : str
The key to display
Returns
-------
value : None or value
The value to display, or None.
Notes
-----
It is not clear if the Meta class is intended to only store
string values or not. Limited protection is provided in case
the value stored is not a string.
"""
try:
val = header[key]
except KeyError:
return None
# Unclear if this can happen
if val is None:
return None
# The metadata value is not guaranteed to be a string
try:
val = val.strip()
if val in ['', 'NONE']:
return None
except AttributeError:
pass
return val
def make_metadata(header, items):
"""Create the metadata table.
Parameters
----------
header : dict-like
The header. Expected to be a sherpa.astro.io.meta.Meta
object but just needs to act like a dictionary.
items : list of (str, str)
The keys to display (in order), if set. The first element
is the key name, and the second is the label in the header
to display.
Returns
-------
meta : list of (str, str) or None
The two-element table rows to display. If no rows matched
return None.
"""
meta = []
for key, desc in items:
val = display_header(header, key)
if val is None:
continue
meta.append((desc, val))
if len(meta) == 0:
return None
return meta
def _extract_fields(obj, stop, summary, open_block=True):
"""Extract the fields up until the stop field.
Parameters
----------
obj : Data instance
It has to have a _fields attribute
stop : str
The attribute at which to stop (and is not included).
summary : str
The label for the details tab.
open_block : bool, optional
Is the details tab open or closed?
Returns
-------
html : str
The HTML for this section.
"""
meta = []
for f in obj._fields[1:]:
if f == stop:
break
v = getattr(obj, f)
if v is None:
continue
meta.append((f.upper(), v))
return formatting.html_section(meta, summary=summary,
open_block=open_block)
def html_pha(pha):
"""HTML representation: PHA"""
from sherpa.astro.plot import DataPHAPlot, backend
ls = []
plotter = DataPHAPlot()
plotter.prepare(pha)
try:
out = backend.as_html_plot(plotter, 'PHA Plot')
except AttributeError:
out = None
if out is None:
out = _extract_fields(pha, 'grouped', 'PHA Data')
ls.append(out)
# Summary properties
meta = []
if pha.name is not None and pha.name != '':
meta.append(('Identifier', pha.name))
if pha.exposure is not None:
meta.append(('Exposure', '{:g} s'.format(pha.exposure)))
meta.append(('Number of bins', len(pha.channel)))
meta.append(('Channel range', '{} - {}'.format(int(pha.channel[0]),
int(pha.channel[-1]))))
# Although assume the counts are integers, do not force this
cmin = pha.counts.min()
cmax = pha.counts.max()
meta.append(('Count range', '{} - {}'.format(cmin, cmax)))
if pha.background_ids != []:
if pha.subtracted:
msg = 'Subtracted'
else:
msg = 'Not subtracted'
meta.append(('Background', msg))
# Make sure show all groups (not just those that are within
# the filter applied to the object).
#
if pha.grouping is not None:
if pha.grouped:
ngrp = pha.apply_grouping(pha.counts).size
msg = 'Applied ({} groups)'.format(ngrp)
else:
msg = 'Not applied'
meta.append(('Grouping', msg))
# Should this only be displayed if a filter has been applied?
#
fexpr = pha.get_filter_expr()
bintype = 'groups' if pha.grouped else 'channels'
nbins = pha.get_dep(filter=True).size
meta.append(('Using', '{} with {} {}'.format(fexpr, nbins, bintype)))
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# TODO:
# correction factors
# Display a subset of header values
# - maybe don't display the FITLER if NONE
# - how about RESPFILE / PHAFILE
if pha.header is not None:
meta = make_metadata(pha.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the PHA'),
('CHANTYPE', 'The channel type'),
('HDUCLAS2', 'Data stored'),
('HDUCLAS3', 'Data format'),
('HDUCLAS4', 'PHA format')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(pha, ls)
def _calc_erange(elo, ehi):
"""Create the energy range information.
Parameters
----------
elo, ehi - NumPy array
The low and high energy bins, in keV.
Returns
-------
erange : str
The string representation of the energy range
"""
# Have we guaranteed the ordering here or not? Assuming
# NumPy arrays.
e1 = elo[0]
e2 = ehi[-1]
emin, emax = (e1, e2) if e1 <= e2 else (e2, e1)
erange = '{:g} - {:g} keV'.format(emin, emax)
# Randomly pick 1% as the cut-off for a constant bin width
#
de = numpy.abs(ehi - elo)
demin = de.min()
demax = de.max()
if demin > 0.0:
dedelta = (demax - demin) / demin
else:
dedelta = 1
if dedelta <= 0.01:
erange += ', bin size {:g} keV'.format(demax)
else:
erange += ', bin size {:g} - {:g} keV'.format(demin, demax)
return erange
def _calc_wrange(wlo, whi):
"""Create the wavelength range information.
Parameters
----------
wlo, whi - NumPy array
The low and high wavelength bins, in Angstroms.
Returns
-------
wrange : str
The string representation of the wavelength range
"""
w1 = wlo[0]
w2 = whi[-1]
wmin, wmax = (w1, w2) if w1 <= w2 else (w2, w1)
wrange = '{:g} - {:g} Å'.format(wmin, wmax)
# Randomly pick 1% as the cut-off for a constant bin width
#
dw = numpy.abs(whi - wlo)
dwmin = dw.min()
dwmax = dw.max()
if dwmin > 0.0:
dwdelta = (dwmax - dwmin) / dwmin
else:
dwdelta = 1
if dwdelta <= 0.01:
wrange += ', bin size {:g} Å'.format(dwmax)
else:
wrange += ', bin size {:g} - {:g} Å'.format(dwmin, dwmax)
return wrange
def html_arf(arf):
"""HTML representation: ARF"""
# Unlike the string representation, this provides extra
# information (e.g. energy range covered). Should it include
# any filters or masks? How about bin_lo/hi values?
#
# It also assumes the units are keV/cm^2 which is not
# guaranteed.
from sherpa.astro.plot import ARFPlot, backend
ls = []
plotter = ARFPlot()
plotter.prepare(arf)
try:
out = backend.as_html_plot(plotter, 'ARF Plot')
except AttributeError:
out = None
if out is None:
out = _extract_fields(arf, 'exposure', 'ARF Data')
ls.append(out)
# Summary properties
meta = []
if arf.name is not None and arf.name != '':
meta.append(('Identifier', arf.name))
if arf.exposure is not None:
meta.append(('Exposure', '{:g} s'.format(arf.exposure)))
meta.append(('Number of bins', len(arf.specresp)))
erange = _calc_erange(arf.energ_lo, arf.energ_hi)
meta.append(('Energy range', erange))
# repeat for wavelengths (without the energy threshold)
#
if arf.bin_lo is not None and arf.bin_hi is not None:
wrange = _calc_wrange(arf.bin_lo, arf.bin_hi)
meta.append(('Wavelength range', wrange))
a1 = numpy.min(arf.specresp)
a2 = numpy.max(arf.specresp)
meta.append(('Area range', '{:g} - {:g} cm<sup>2</sup>'.format(a1, a2)))
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# Display a subset of header values
# - maybe don't display the FITLER if NONE
# - how about RESPFILE / PHAFILE
if arf.header is not None:
meta = make_metadata(arf.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the ARF')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(arf, ls)
def html_rmf(rmf):
"""HTML representation: RMF"""
# See _html_arf for general comments
ls = []
svg = simulate_rmf_plot(rmf)
if svg is not None:
out = formatting.html_svg(svg, 'RMF Plot')
else:
out = _extract_fields(rmf, 'ethresh', 'RMF Data')
ls.append(out)
# Summary properties
meta = []
if rmf.name is not None and rmf.name != '':
meta.append(('Identifier', rmf.name))
meta.append(('Number of channels', rmf.detchans))
meta.append(('Number of energies', len(rmf.energ_hi)))
erange = _calc_erange(rmf.energ_lo, rmf.energ_hi)
if rmf.ethresh is not None and rmf.energ_lo[0] <= rmf.ethresh:
# Not entirely happy with the wording of this
erange += ' (minimum threshold of {} was used)'.format(rmf.ethresh)
meta.append(('Energy range', erange))
meta.append(('Channel range', '{} - {}'.format(int(rmf.offset),
int(rmf.offset + rmf.detchans - 1))))
# Could show the energy range as given by e_min/e_max but
# is this useful?
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# Display a subset of header values
# - how about PHAFILE
if rmf.header is not None:
meta = make_metadata(rmf.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the RMF'),
('CHANTYPE', 'The channel type'),
('LO_THRES', 'The minimum probability threshold'),
('HDUCLAS3', 'Matrix contents')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(rmf, ls)
def html_img(img):
"""HTML representation: IMG
Special-case of the Data2D handling. It would be nice to re-use
parts of the superclass behavior.
"""
ls = []
dtype = type(img).__name__
svg = img_plot(img)
if svg is not None:
out = formatting.html_svg(svg, '{} Plot'.format(dtype))
summary = ''
else:
# Only add prefix to summary if there's no plot
summary = '{} '.format(dtype)
# Summary properties
#
meta = []
if img.name is not None and img.name != '':
meta.append(('Identifier', img.name))
# shape is better defined for DataIMG than Data2D
meta.append(('Shape',
('{1} by {0} pixels'.format(*img.shape))))
meta.append(('Number of bins', len(img.y)))
# Rely on the _fields ordering, ending at shape
for f in img._fields[1:]:
if f == 'shape':
break
meta.append((f.upper(), getattr(img, f)))
if img.staterror is not None:
meta.append(('Statistical error', img.staterror))
if img.syserror is not None:
meta.append(('Systematic error', img.syserror))
out = formatting.html_section(meta, summary=summary + 'Data',
open_block=True)
ls.append(out)
# Add coordinate-system information. The WCS structure in Sherpa
# is not really sufficient to identify the transform.
#
if img.sky is not None:
meta = []
meta.append(('Center pixel (logical)', img.sky.crpix))
meta.append(('Center pixel (physical)', img.sky.crval))
meta.append(('Pixel size', img.sky.cdelt))
ls.append(formatting.html_section(meta,
summary='Coordinates: {}'.format(img.sky.name)))
if img.eqpos is not None:
meta = []
meta.append(('Center pixel (physical)', img.eqpos.crpix))
# could convert to RA/Dec
meta.append(('Center pixel (world)', img.eqpos.crval))
meta.append(('Pixel size', img.eqpos.cdelt))
meta.append(('Rotation', img.eqpos.crota))
meta.append(('Epoch', img.eqpos.epoch))
meta.append(('Equinox', img.eqpos.equinox))
ls.append(formatting.html_section(meta,
summary='Coordinates: {}'.format(img.eqpos.name)))
if img.header is not None:
meta = make_metadata(img.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('OBSERVER', 'Observer'),
('EXPOSURE', 'Exposure time'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the image')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(img, ls)
def simulate_rmf_plot(rmf):
"""Create a plot which shows the response to monochromatic energies.
The SVG of the plot is returned if matplotlib is selected as the
backend. The choice of energies used to create the response to
monochromatic energies is based on the data range (using log
scaling).
"""
from sherpa.models.basic import Delta1D
from sherpa.plot import backend
try:
from matplotlib import pyplot as plt
except ImportError:
return None
# X access
#
if rmf.e_min is None:
x = numpy.arange(rmf.offset, rmf.detchans + rmf.offset)
xlabel = 'Channel'
else:
x = 0.5 * (rmf.e_min + rmf.e_max)
xlabel = 'Energy (keV)'
# How many monochromatic lines to use
#
nlines = 5
# for now let's just create log-spaced energies
#
elo, ehi = rmf.energ_lo, rmf.energ_hi
l1 = numpy.log10(elo[0])
l2 = numpy.log10(ehi[-1])
dl = (l2 - l1) / (nlines + 1)
lines = l1 + dl * numpy.arange(1, nlines + 1)
energies = numpy.power(10, lines)
mdl = Delta1D()
def plotfunc():
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
for energy in energies:
mdl.pos = energy
y = rmf.apply_rmf(mdl(elo, ehi))
ax.plot(x, y, label='{:.2g} keV'.format(energy))
# Try to get the legend centered nicely below the plot
fig.legend(loc='center', ncol=nlines, bbox_to_anchor=(0.0, 0, 1, 0.1))
ax.set_xlabel(xlabel)
ax.set_title(rmf.name)
ax.set_xscale('log')
ax.set_yscale('log')
return fig
try:
return backend.as_svg(plotfunc)
except AttributeError:
return None
def img_plot(img):
"""Display the image.
The SVG of the plot is returned if matplotlib is selected as the
backend.
The eqpos/wcs coordinate system is not used; it uses physical
instead. This greatly simplifies the plot (no need to handle WCS).
"""
from sherpa.plot import backend
try:
from matplotlib import pyplot as plt
except ImportError:
return None
# Apply filter and coordinate system
#
y = img.get_img()
# extent is left, right, bottom, top and describes the
# outer-edge of the pixels.
#
ny, nx = img.shape
coord = img.coord
if coord in ['physical', 'world']:
x0, y0 = img._logical_to_physical(0.5, 0.5)
x1, y1 = img._logical_to_physical(nx + 0.5, ny + 0.5)
extent = (x0, x1, y0, y1)
lbl = 'physical'
cdelt = img.sky.cdelt
aspect = 'equal' if cdelt[1] == cdelt[0] else 'auto'
else:
extent = (0.5, nx + 0.5, 0.5, ny + 0.5)
aspect = 'equal'
lbl = 'logical'
# What is the filtered dataset?
#
if img.get_filter_expr() != '':
x0, x1 = img.get_indep(filter=True)
x0min, x0max = numpy.min(x0), numpy.max(x0)
x1min, x1max = numpy.min(x1), numpy.max(x1)
# Should add in half cdelt to padd these, but
# it looks like it isn't necessary.
filtered = (x0min, x1min, x0max, x1max)
else:
filtered = None
def plotfunc():
fig, ax = plt.subplots()
im = ax.imshow(y, origin='lower', extent=extent, aspect=aspect)
fig.colorbar(im, ax=ax)
if filtered != None:
ax.set_xlim(filtered[0], filtered[2])
ax.set_ylim(filtered[1], filtered[3])
ax.set_xlabel('X ({})'.format(lbl))
ax.set_ylabel('Y ({})'.format(lbl))
if img.name is not None and img.name != '':
ax.set_title(img.name)
return fig
try:
return backend.as_svg(plotfunc)
except AttributeError:
return None
class DataOgipResponse(Data1DInt):
"""
Parent class for OGIP responses, in particular ARF and RMF. This class implements some common validation code that
inheriting classes can call in their initializers.
Inheriting classes should override the protected class field `_ui_name` to provide a more specific label for user
messages.
"""
_ui_name = "OGIP Response"
# FIXME For a future time when we'll review this code in a deeper way: we
# could have better separation of concerns if the initializers of `DataARF`
# and `DataRMF` did not rely on the `Data` initializer, and if the
# class hierarchy was better organized (e.g. it looks like children must
# not call their super's initializer. Also, I'd expect validation to
# happen in individual methods rather than in a large one, and nested ifs
# should be avoided if possible.
#
# The shift to creating a warning message instead of raising an
# error has made this messier.
#
def _validate_energy_ranges(self, label, elo, ehi, ethresh):
"""Check the lo/hi values are > 0, handling common error case.
Several checks are made, to make sure the parameters follow
the OGIP standard. At present a failed check can result in
either a warning message being logged, or an error raised.
It was felt that raising an error in all cases would not be
helpful to a user, who can't (easily) change the response
files.
Parameters
----------
label : str
The response file identifier.
elo, ehi : numpy.ndarray
The input ENERG_LO and ENERG_HI arrays. They are assumed
to be one-dimensional and have the same number of elements.
ethresh : None or float, optional
If None, then elo must be greater than 0. When set, the
start bin can have a low-energy edge of 0; it is replaced
by ethresh. If set, ethresh must be greater than 0.
An error is raised if ethresh is larger than the upper-edge
of the first bin (only if the lower edge has been replaced).
Returns
-------
elo, ehi : numpy arrays
The validated energy limits. These can be the input arrays
or a copy of them. At present the ehi array is the same as
the input array, but this may change in the future.
Notes
-----
Only some of the constraints provided by the OGIP standard are
checked here, since there are issues involving numerical effects
(e.g. when checking that two bins do not overlap), as well as
uncertainty over what possible behavior is seen in released
data products for missions. The current set of checks are:
- ehi > elo for each bin
- elo is monotonic (ascending or descending)
- when emin is set, the lowest value in elo is >= 0,
otherwise it is > 0.
- ethresh (if set) is less than the minimum value in ENERG_HI
"""
rtype = self._ui_name
if elo.size != ehi.size:
raise ValueError("The energy arrays must have the same size, not {} and {}" .format(elo.size, ehi.size))
if ethresh is not None and ethresh <= 0.0:
raise ValueError("ethresh is None or > 0")
if (elo >= ehi).any():
# raise DataErr('ogip-error', rtype, label,
# 'has at least one bin with ENERG_HI < ENERG_LO')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has at least one bin with ENERG_HI < ENERG_LO'
warnings.warn(wmsg)
# if elo is monotonically increasing, all elements will be True
# decreasing, False
#
# so the sum will be number of elements or 0
#
increasing = numpy.diff(elo, n=1) > 0.0
nincreasing = increasing.sum()
if nincreasing > 0 and nincreasing != len(increasing):
# raise DataErr('ogip-error', rtype, label,
# 'has a non-monotonic ENERG_LO array')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has a non-monotonic ENERG_LO array'
warnings.warn(wmsg)
if nincreasing == 0:
startidx = -1
else:
startidx = 0
e0 = elo[startidx]
if ethresh is None:
if e0 <= 0.0:
raise DataErr('ogip-error', rtype, label,
'has an ENERG_LO value <= 0')
else:
# TODO: should this equality be replaced by an approximation test?
if e0 == 0.0:
if ehi[startidx] <= ethresh:
raise DataErr('ogip-error', rtype, label,
'has an ENERG_HI value <= the replacement ' +
'value of {}'.format(ethresh))
elo = elo.copy()
elo[startidx] = ethresh
wmsg = "The minimum ENERG_LO in the " + \
"{} '{}' was 0 and has been ".format(rtype, label) + \
"replaced by {}".format(ethresh)
warnings.warn(wmsg)
elif e0 < 0.0:
# raise DataErr('ogip-error', rtype, label,
# 'has an ENERG_LO value < 0')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has an ENERG_LO value < 0'
warnings.warn(wmsg)
return elo, ehi
def _get_data_space(self, filter=False):
return EvaluationSpace1D(self._lo, self._hi)
class DataARF(DataOgipResponse):
"""ARF data set.
The ARF format is described in OGIP documents [1]_ and [2]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
energ_lo, energ_hi, specresp : numpy.ndarray
The values of the ENERG_LO, ENERG_HI, and SPECRESP columns
for the ARF. The ENERG_HI values must be greater than the
ENERG_LO values for each bin, and the energy arrays must be
in increasing or decreasing order.
bin_lo, bin_hi : array or None, optional
exposure : number or None, optional
The exposure time for the ARF, in seconds.
header : dict or None, optional
ethresh : number or None, optional
If set it must be greater than 0 and is the replacement value
to use if the lowest-energy value is 0.0.
Raises
------
sherpa.utils.err.DataErr
This is raised if the energy arrays do not follow some of the
OGIP standards.
Notes
-----
There is limited checking that the ARF matches the OGIP standard,
but as there are cases of released data products that do not follow
the standard, these checks can not cover all cases.
References
----------
.. [1] "The Calibration Requirements for Spectral Analysis (Definition of RMF and ARF file formats)", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html
.. [2] "The Calibration Requirements for Spectral Analysis Addendum: Changes log", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002a/cal_gen_92_002a.html
"""
_ui_name = "ARF"
_fields = ("name", "energ_lo", "energ_hi", "specresp", "bin_lo", "bin_hi", "exposure", "ethresh")
def _get_specresp(self):
return self._specresp
def _set_specresp(self, val):
self._specresp = val
self._rsp = val
specresp = property(_get_specresp, _set_specresp)
def __init__(self, name, energ_lo, energ_hi, specresp, bin_lo=None,
bin_hi=None, exposure=None, header=None, ethresh=None):
self.specresp = specresp
self.bin_lo = bin_lo
self.bin_hi = bin_hi
self.exposure = exposure
self.header = header
self.ethresh = ethresh
energ_lo, energ_hi = self._validate_energy_ranges(name, energ_lo, energ_hi, ethresh)
self._lo, self._hi = energ_lo, energ_hi
self.energ_lo = energ_lo
self.energ_hi = energ_hi
Data1DInt.__init__(self, name, energ_lo, energ_hi, specresp)
def __str__(self):
# Print the metadata first
try:
ss = Data.__str__(self)
except:
ss = self._fields
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the ARF
"""
return html_arf(self)
def __setstate__(self, state):
if 'header' not in state:
self.header = None
self.__dict__.update(state)
if '_specresp' not in state:
self.__dict__['_specresp'] = state.get('specresp', None)
self.__dict__['_rsp'] = state.get('specresp', None)
def apply_arf(self, src, *args, **kwargs):
"Fold the source array src through the ARF and return the result"
# an external function must be called so all ARFs go through
# a single entry point in order for caching to 'work'
model = arf_fold(src, self._rsp)
# Rebin the high-res source model folded through ARF down to the size
# the PHA or RMF expects.
if args != ():
(arf, rmf) = args
if rmf != () and len(arf[0]) > len(rmf[0]):
model = rebin(model, arf[0], arf[1], rmf[0], rmf[1])
return model
def notice(self, bin_mask=None):
self._rsp = self.specresp
self._lo = self.energ_lo
self._hi = self.energ_hi
if bin_mask is not None:
self._rsp = self.specresp[bin_mask]
self._lo = self.energ_lo[bin_mask]
self._hi = self.energ_hi[bin_mask]
def get_indep(self, filter=False):
return (self._lo, self._hi)
def get_dep(self, filter=False):
return self._rsp
def get_xlabel(self):
return 'Energy (keV)'
def get_ylabel(self):
from sherpa.plot import backend
return 'cm' + backend.get_latex_for_string('^2')
class DataRMF(DataOgipResponse):
"""RMF data set.
The RMF format is described in OGIP documents [1]_ and [2]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
detchans : int
energ_lo, energ_hi : array
The values of the ENERG_LO, ENERG_HI, and SPECRESP columns
for the ARF. The ENERG_HI values must be greater than the
ENERG_LO values for each bin, and the energy arrays must be
in increasing or decreasing order.
n_grp, f_chan, n_chan, matrix : array-like
offset : int, optional
e_min, e_max : array-like or None, optional
header : dict or None, optional
ethresh : number or None, optional
If set it must be greater than 0 and is the replacement value
to use if the lowest-energy value is 0.0.
Notes
-----
There is limited checking that the RMF matches the OGIP standard,
but as there are cases of released data products that do not follow
the standard, these checks can not cover all cases. If a check fails
then a warning message is logged.
References
----------
.. [1] "The Calibration Requirements for Spectral Analysis (Definition of RMF and ARF file formats)", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html
.. [2] "The Calibration Requirements for Spectral Analysis Addendum: Changes log", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002a/cal_gen_92_002a.html
"""
_ui_name = "RMF"
_fields = ("name", "detchans", "energ_lo", "energ_hi", "n_grp", "f_chan", "n_chan", "matrix", "offset", "e_min",
"e_max", "ethresh")
def __init__(self, name, detchans, energ_lo, energ_hi, n_grp, f_chan,
n_chan, matrix, offset=1, e_min=None, e_max=None,
header=None, ethresh=None):
energ_lo, energ_hi = self._validate(name, energ_lo, energ_hi, ethresh)
if offset < 0:
raise ValueError("offset must be >=0, not {}".format(offset))
self.energ_lo = energ_lo
self.energ_hi = energ_hi
self.offset = offset
self.detchans = detchans
self.e_min = e_min
self.e_max = e_max
self.header = header
self.n_grp = n_grp
self.f_chan = f_chan
self.n_chan = n_chan
self.matrix = matrix
self.ethresh = ethresh
self._fch = f_chan
self._nch = n_chan
self._grp = n_grp
self._rsp = matrix
self._lo = energ_lo
self._hi = energ_hi
Data1DInt.__init__(self, name, energ_lo, energ_hi, matrix)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the RMF
"""
return html_rmf(self)
def __setstate__(self, state):
if 'header' not in state:
self.header = None
self.__dict__.update(state)
def _validate(self, name, energy_lo, energy_hi, ethresh):
"""
Validate energy ranges and, if necessary, make adjustments.
Subclasses may override this method to perform different validations
or skip validation altogether.
Parameters
----------
name : str
The name/label of the current file
energy_lo, energ_hi : NumPy array
The lower bounds of the energy bins. The arrays must have the same size
ethresh : float
The lowest energy value
Returns
-------
energy_lo, energ_hi : NumPy array
The energy values to use for the bin boundaries
"""
return self._validate_energy_ranges(name, energy_lo, energy_hi, ethresh)
def apply_rmf(self, src, *args, **kwargs):
"Fold the source array src through the RMF and return the result"
# Rebin the high-res source model from the PHA down to the size
# the RMF expects.
if args != ():
(rmf, pha) = args
if pha != () and len(pha[0]) > len(rmf[0]):
src = rebin(src, pha[0], pha[1], rmf[0], rmf[1])
if len(src) != len(self._lo):
raise TypeError("Mismatched filter between ARF and RMF " +
"or PHA and RMF")
return rmf_fold(src, self._grp, self._fch, self._nch, self._rsp,
self.detchans, self.offset)
def notice(self, noticed_chans=None):
bin_mask = None
self._fch = self.f_chan
self._nch = self.n_chan
self._grp = self.n_grp
self._rsp = self.matrix
self._lo = self.energ_lo
self._hi = self.energ_hi
if noticed_chans is not None:
(self._grp, self._fch, self._nch, self._rsp,
bin_mask) = filter_resp(noticed_chans, self.n_grp, self.f_chan,
self.n_chan, self.matrix, self.offset)
self._lo = self.energ_lo[bin_mask]
self._hi = self.energ_hi[bin_mask]
return bin_mask
def get_indep(self, filter=False):
return (self._lo, self._hi)
def get_dep(self, filter=False):
return self.apply_rmf(numpy.ones(self.energ_lo.shape, SherpaFloat))
def get_xlabel(self):
if (self.e_min is not None) and (self.e_max is not None):
return 'Energy (keV)'
return 'Channel'
def get_ylabel(self):
return 'Counts'
# FIXME There are places in the code that explicitly check if an object is an instance of sherpa.astro.data.DataRMF.
# So it's safer to make DataRosatRMF a subclass of the default class, although in principle they should be siblings
# and subclasses of the same superclass.
class DataRosatRMF(DataRMF):
ui_name = "ROSAT RMF"
def _validate(self, name, energy_lo, energy_hi, ethresh):
return energy_lo, energy_hi
class DataPHA(Data1D):
"""PHA data set, including any associated instrument and background data.
The PHA format is described in an OGIP document [1]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
channel, counts : array of int
The PHA data.
staterror, syserror : scalar or array or None, optional
The statistical and systematic errors for the data, if
defined.
bin_lo, bin_hi : array or None, optional
grouping : array of int or None, optional
quality : array of int or None, optional
exposure : number or None, optional
The exposure time for the PHA data set, in seconds.
backscal : scalar or array or None, optional
areascal : scalar or array or None, optional
header : dict or None, optional
Attributes
----------
name : str
Used to store the file name, for data read from a file.
channel
counts
staterror
syserror
bin_lo
bin_hi
grouping
quality
exposure
backscal
areascal
Notes
-----
The original data is stored in the attributes - e.g. `counts` - and
the data-access methods, such as `get_dep` and `get_staterror`,
provide any necessary data manipulation to handle cases such as:
background subtraction, filtering, and grouping.
The handling of the AREASCAl value - whether it is a scalar or
array - is currently in flux. It is a value that is stored with the
PHA file, and the OGIP PHA standard ([1]_) describes the observed
counts being divided by the area scaling before comparison to the
model. However, this is not valid for Poisson-based statistics, and
is also not how XSPEC handles AREASCAL ([2]_); the AREASCAL values
are used to scale the exposure times instead. The aim is to add
this logic to the instrument models in `sherpa.astro.instrument`,
such as `sherpa.astro.instrument.RMFModelPHA`. The area scaling still
has to be applied when calculating the background contribution to
a spectrum, as well as when calculating the data and model values used
for plots (following XSPEC so as to avoid sharp discontinuities where
the area-scaling factor changes strongly).
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
.. [2] Private communication with Keith Arnaud
"""
_fields = ("name", "channel", "counts", "bin_lo", "bin_hi", "grouping", "quality",
"exposure", "backscal", "areascal")
def _get_grouped(self):
return self._grouped
def _set_grouped(self, val):
val = bool(val)
if val and self.grouping is None:
raise DataErr('nogrouping', self.name)
if self._grouped == val:
return
# As the grouping status is being changed, we need to reset the mask
# to be correct size, while still noticing groups within the filter
#
if numpy.iterable(self.mask):
old_filter = self.get_filter(group=val)
self._grouped = val
self.ignore()
for vals in parse_expr(old_filter):
self.notice(*vals)
self._grouped = val
grouped = property(_get_grouped, _set_grouped,
doc='Are the data grouped?')
def _get_subtracted(self):
return self._subtracted
def _set_subtracted(self, val):
val = bool(val)
if len(self._backgrounds) == 0:
raise DataErr('nobkg', self.name)
self._subtracted = val
subtracted = property(_get_subtracted, _set_subtracted,
doc='Are the background data subtracted?')
def _get_units(self):
return self._units
def _set_units(self, val):
units = str(val).strip().lower()
if units == 'bin':
units = 'channel'
if units.startswith('chan'):
# Note: the names of these routines appear confusing because of the
# way group values are used
self._to_channel = self._channel_to_group
self._from_channel = self._group_to_channel
units = 'channel'
elif units.startswith('ener'):
self._to_channel = self._energy_to_channel
self._from_channel = self._channel_to_energy
units = 'energy'
elif units.startswith('wave'):
self._to_channel = self._wavelength_to_channel
self._from_channel = self._channel_to_wavelength
units = 'wavelength'
else:
raise DataErr('bad', 'quantity', val)
for id in self.background_ids:
bkg = self.get_background(id)
if bkg.get_response() != (None, None) or \
(bkg.bin_lo is not None and bkg.bin_hi is not None):
bkg.units = units
self._units = units
units = property(_get_units, _set_units,
doc='Units of the independent axis')
def _get_rate(self):
return self._rate
def _set_rate(self, val):
self._rate = bool_cast(val)
for id in self.background_ids:
# TODO: shouldn't this store bool_cast(val) instead?
self.get_background(id).rate = val
rate = property(_get_rate, _set_rate,
doc='Quantity of y-axis: counts or counts/sec')
def _get_plot_fac(self):
return self._plot_fac
def _set_plot_fac(self, val):
self._plot_fac = int(val)
for id in self.background_ids:
self.get_background(id).plot_fac = val
plot_fac = property(_get_plot_fac, _set_plot_fac,
doc='Number of times to multiply the y-axis ' +
'quantity by x-axis bin size')
def _get_response_ids(self):
return self._response_ids
def _set_response_ids(self, ids):
if not numpy.iterable(ids):
raise DataErr('idsnotarray', 'response', str(ids))
keys = self._responses.keys()
for id in ids:
if id not in keys:
raise DataErr('badids', str(id), 'response', str(keys))
ids = list(ids)
self._response_ids = ids
response_ids = property(_get_response_ids, _set_response_ids,
doc=('IDs of defined instrument responses ' +
'(ARF/RMF pairs)'))
def _get_background_ids(self):
return self._background_ids
def _set_background_ids(self, ids):
if not numpy.iterable(ids):
raise DataErr('idsnotarray', 'background', str(ids))
keys = self._backgrounds.keys()
for id in ids:
if id not in keys:
raise DataErr('badids', str(id), 'background', str(keys))
ids = list(ids)
self._background_ids = ids
background_ids = property(_get_background_ids, _set_background_ids,
doc='IDs of defined background data sets')
_fields = ('name', 'channel', 'counts', 'staterror', 'syserror', 'bin_lo', 'bin_hi', 'grouping', 'quality',
'exposure', 'backscal', 'areascal', 'grouped', 'subtracted', 'units', 'rate', 'plot_fac', 'response_ids',
'background_ids')
def __init__(self, name, channel, counts, staterror=None, syserror=None,
bin_lo=None, bin_hi=None, grouping=None, quality=None,
exposure=None, backscal=None, areascal=None, header=None):
self.channel = channel
self.counts = counts
self.bin_lo = bin_lo
self.bin_hi = bin_hi
self.quality = quality
self.grouping = grouping
self.exposure = exposure
self.backscal = backscal
self.areascal = areascal
self.header = header
self._grouped = (grouping is not None)
self._original_groups = True
self._subtracted = False
self._response_ids = []
self._background_ids = []
self._responses = {}
self._backgrounds = {}
self._rate = True
self._plot_fac = 0
self.units = 'channel'
self.quality_filter = None
Data1D.__init__(self, name, channel, counts, staterror, syserror)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the PHA
"""
return html_pha(self)
def __getstate__(self):
state = self.__dict__.copy()
del state['_to_channel']
del state['_from_channel']
return state
def __setstate__(self, state):
self._background_ids = state['_background_ids']
self._backgrounds = state['_backgrounds']
self._set_units(state['_units'])
if 'header' not in state:
self.header = None
self.__dict__.update(state)
primary_response_id = 1
"""The identifier for the response component when not set."""
def set_analysis(self, quantity, type='rate', factor=0):
"""Return the units used when fitting spectral data.
Parameters
----------
quantity : {'channel', 'energy', 'wavelength'}
The analysis setting.
type : {'rate', 'counts'}, optional
Do plots display a rate or show counts?
factor : int, optional
The Y axis of plots is multiplied by Energy^factor or
Wavelength^factor before display. The default is 0.
Raises
------
sherpa.utils.err.DatatErr
If the type argument is invalid, the RMF or ARF has the
wrong size, or there in no response.
See Also
--------
get_analysis
Examples
--------
>>> pha.set_analysis('energy')
>>> pha.set_analysis('wave', type='counts' factor=1)
"""
self.plot_fac = factor
type = str(type).strip().lower()
if not (type.startswith('counts') or type.startswith('rate')):
raise DataErr("plottype", type, "'rate' or 'counts'")
self.rate = (type == 'rate')
arf, rmf = self.get_response()
if rmf is not None and rmf.detchans != len(self.channel):
raise DataErr("incompatibleresp", rmf.name, self.name)
if (rmf is None and arf is None) and \
(self.bin_lo is None and self.bin_hi is None) and \
quantity != 'channel':
raise DataErr('norsp', self.name)
if rmf is None and arf is not None and quantity != 'channel' and \
len(arf.energ_lo) != len(self.channel):
raise DataErr("incompleteresp", self.name)
self.units = quantity
def get_analysis(self):
"""Return the units used when fitting spectral data.
Returns
-------
setting : { 'channel', 'energy', 'wavelength' }
The analysis setting.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the `id` argument is not recognized.
See Also
--------
set_analysis
Examples
--------
>>> is_wave = pha.get_analysis() == 'wavelength'
"""
return self.units
def _fix_response_id(self, id):
if id is None:
id = self.primary_response_id
return id
def get_response(self, id=None):
"""Return the response component.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
arf, rmf: sherpa.astro.data.DataARF,sherpa.astro.data.DataRMF instances or None
The response, as an ARF and RMF. Either, or both,
components can be None.
See Also
--------
delete_response, get_arf, get_rmf, set_response
"""
id = self._fix_response_id(id)
return self._responses.get(id, (None, None))
def set_response(self, arf=None, rmf=None, id=None):
"""Add or replace a response component.
To remove a response use delete_response(), as setting arf and
rmf to None here does nothing.
Parameters
----------
arf : sherpa.astro.data.DataARF instance or None, optional
The ARF to add if any.
rmf : sherpa.astro.data.DataRMF instance or None, optional
The RMF to add, if any.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, get_response, set_arf, set_rmf
"""
if (arf is None) and (rmf is None):
return
id = self._fix_response_id(id)
self._responses[id] = (arf, rmf)
ids = self.response_ids[:]
if id not in ids:
ids.append(id)
self.response_ids = ids
def delete_response(self, id=None):
"""Remove the response component.
If the response component does not exist then the method
does nothing.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
set_response
"""
id = self._fix_response_id(id)
self._responses.pop(id, None)
ids = self.response_ids[:]
ids.remove(id)
self.response_ids = ids
def get_arf(self, id=None):
"""Return the ARF from the response.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
arf: sherpa.astro.data.DataARF instance or None
The ARF, if set.
See Also
--------
get_response, get_rmf
"""
return self.get_response(id)[0]
def get_rmf(self, id=None):
"""Return the RMF from the response.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
rmf: sherpa.astro.data.DataRMF instance or None
The RMF, if set.
See Also
--------
get_arf, get_response
"""
return self.get_response(id)[1]
def set_arf(self, arf, id=None):
"""Add or replace the ARF in a response component.
This replaces the existing ARF of the response, keeping the
previous RMF (if set). Use the delete_response method to
remove the response, rather than setting arf to None.
Parameters
----------
arf : sherpa.astro.data.DataARF instance
The ARF to add.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, set_response, set_rmf
"""
self.set_response(arf, self.get_rmf(id), id)
def set_rmf(self, rmf, id=None):
"""Add or replace the RMF in a response component.
This replaces the existing RMF of the response, keeping the
previous ARF (if set). Use the delete_response method to
remove the response, rather than setting rmf to None.
Parameters
----------
rmf : sherpa.astro.data.DataRMF instance
The RMF to add.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, set_response, set_arf
"""
self.set_response(self.get_arf(id), rmf, id)
def get_specresp(self, filter=False):
"""Return the effective area values for the data set.
Parameters
----------
filter : bool, optional
Should the filter attached to the data set be applied to
the ARF or not. The default is `False`.
Returns
-------
arf : array
The effective area values for the data set (or background
component).
"""
filter = bool_cast(filter)
self.notice_response(False)
arf, rmf = self.get_response()
newarf = None
if arf is not None and rmf is not None:
specresp = arf.get_dep()
elo, ehi = arf.get_indep()
lo, hi = self._get_ebins(group=False)
newarf = interpolate(lo, elo, specresp)
newarf[newarf <= 0] = 1.
if filter:
newarf = self.apply_filter(newarf, self._middle)
return newarf
# The energy bins can be grouped or ungrouped. By default,
# they should be grouped if the data are grouped. There are
# certain contexts (e.g., plotting) where we will retrieve the
# energy bins, and later filter the data; but filtering
# is automatically followed by grouping. Grouping the data
# twice is an error.
def _get_ebins(self, response_id=None, group=True):
group = bool_cast(group)
arf, rmf = self.get_response(response_id)
if (self.bin_lo is not None) and (self.bin_hi is not None):
elo = self.bin_lo
ehi = self.bin_hi
if (elo[0] > elo[-1]) and (ehi[0] > ehi[-1]):
elo = self._hc / self.bin_hi
ehi = self._hc / self.bin_lo
elif rmf is not None:
if (rmf.e_min is None) or (rmf.e_max is None):
raise DataErr('noenergybins', 'RMF')
elo = rmf.e_min
ehi = rmf.e_max
elif arf is not None:
elo = arf.energ_lo
ehi = arf.energ_hi
else:
elo = self.channel - 0.5
ehi = self.channel + 0.5
if self.units == 'channel':
elo = self.channel - 0.5
ehi = self.channel + 0.5
# If the data are grouped, then we should group up
# the energy bins as well. E.g., if group 1 is
# channels 1-5, then the energy boundaries for the
# *group* should be elo[0], ehi[4].
if self.grouped and group:
elo = self.apply_grouping(elo, self._min)
ehi = self.apply_grouping(ehi, self._max)
return (elo, ehi)
def get_indep(self, filter=True):
if filter:
return (self.get_noticed_channels(),)
return (self.channel,)
def _get_indep(self, filter=False):
if (self.bin_lo is not None) and (self.bin_hi is not None):
elo = self.bin_lo
ehi = self.bin_hi
if (elo[0] > elo[-1]) and (ehi[0] > ehi[-1]):
if self.units == 'wavelength':
return (elo, ehi)
elo = self._hc / self.bin_hi
ehi = self._hc / self.bin_lo
else:
energylist = []
for id in self.response_ids:
arf, rmf = self.get_response(id)
lo = None
hi = None
if rmf is not None:
lo = rmf.energ_lo
hi = rmf.energ_hi
if filter:
lo, hi = rmf.get_indep()
elif arf is not None:
lo = arf.energ_lo
hi = arf.energ_hi
if filter:
lo, hi = arf.get_indep()
energylist.append((lo, hi))
if len(energylist) > 1:
elo, ehi, lookuptable = compile_energy_grid(energylist)
elif (not energylist or
(len(energylist) == 1 and
numpy.equal(energylist[0], None).any())):
raise DataErr('noenergybins', 'Response')
else:
elo, ehi = energylist[0]
lo, hi = elo, ehi
if self.units == 'wavelength':
lo = self._hc / ehi
hi = self._hc / elo
return (lo, hi)
def _channel_to_group(self, val):
"""Convert channel number to group number.
For ungrouped data channel and group numbering are the
same.
"""
if not self.grouped:
return val
# The edge channels of each group.
#
lo = self.apply_grouping(self.channel, self._min)
hi = self.apply_grouping(self.channel, self._max)
val = numpy.asarray(val).astype(numpy.int_)
res = []
for v in val.flat:
# could follow _energy_to_channel but for now go
# with something simple
if v < self.channel[0]:
ans = self.channel[0]
elif v > self.channel[-1]:
ans = self.channel[-1]
else:
idx, = numpy.where((v >= lo) & (v <= hi))
ans = idx[0] + 1
res.append(ans)
res = numpy.asarray(res, SherpaFloat)
if val.shape == ():
return res[0]
return res
def _group_to_channel(self, val, group=True, response_id=None):
"""Convert group number to channel number.
For ungrouped data channel and group numbering are the
same. The mid-point of each group is used (rounded down
if not an integer).
"""
if not self.grouped or not group:
return val
# The middle channel of each group.
#
mid = self.apply_grouping(self.channel, self._middle)
# Convert to an integer (this keeps the channel within
# the group).
#
mid = numpy.floor(mid)
val = numpy.asarray(val).astype(numpy.int_) - 1
try:
return mid[val]
except IndexError:
raise DataErr('invalid group number: {}'.format(val))
def _channel_to_energy(self, val, group=True, response_id=None):
elo, ehi = self._get_ebins(response_id=response_id, group=group)
val = numpy.asarray(val).astype(numpy.int_) - 1
try:
return (elo[val] + ehi[val]) / 2.0
except IndexError:
raise DataErr('invalidchannel', val)
def _energy_to_channel(self, val):
elo, ehi = self._get_ebins()
# special case handling no noticed data (e.g. ignore_bad
# removes all bins); assume if elo is empty then so is ehi.
#
if len(elo) == 0:
raise DataErr('notmask')
val = numpy.asarray(val)
res = []
for v in val.flat:
if tuple(numpy.flatnonzero(elo <= v)) == ():
if elo[0] > elo[-1] and ehi[0] > ehi[-1]:
res.append(SherpaFloat(len(elo)))
else:
res.append(SherpaFloat(1))
elif tuple(numpy.flatnonzero(ehi > v)) == ():
if elo[0] > elo[-1] and ehi[0] > ehi[-1]:
res.append(SherpaFloat(1))
else:
res.append(SherpaFloat(len(ehi)))
elif tuple(numpy.flatnonzero((elo <= v) & (ehi > v)) + 1) != ():
res.append(SherpaFloat(
numpy.flatnonzero((elo <= v) & (ehi > v)) + 1))
elif (elo <= v).argmin() == (ehi > v).argmax():
res.append(SherpaFloat((elo <= v).argmin()))
else:
raise DataErr("energytochannel", v)
if val.shape == ():
return res[0]
return numpy.asarray(res, SherpaFloat)
_hc = 12.39841874 # nist.gov in [keV-Angstrom]
def _channel_to_wavelength(self, val, group=True, response_id=None):
tiny = numpy.finfo(numpy.float32).tiny
vals = numpy.asarray(self._channel_to_energy(val, group, response_id))
if vals.shape == ():
if vals == 0.0:
vals = tiny
else:
vals[vals == 0.0] = tiny
vals = self._hc / vals
return vals
def _wavelength_to_channel(self, val):
tiny = numpy.finfo(numpy.float32).tiny
vals = numpy.asarray(val)
if vals.shape == ():
if vals == 0.0:
vals = tiny
else:
vals[vals == 0.0] = tiny
vals = self._hc / vals
return self._energy_to_channel(vals)
default_background_id = 1
"""The identifier for the background component when not set."""
def _fix_background_id(self, id):
if id is None:
id = self.default_background_id
return id
def get_background(self, id=None):
"""Return the background component.
Parameters
----------
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
Returns
-------
bkg : sherpa.astro.data.DataPHA instance or None
The background dataset. If there is no component then None
is returned.
See Also
--------
delete_background, set_background
"""
id = self._fix_background_id(id)
return self._backgrounds.get(id)
def set_background(self, bkg, id=None):
"""Add or replace a background component.
If the background has no grouping of quality arrays then they
are copied from the source region. If the background has no
response information (ARF or RMF) then the response is copied
from the source region.
Parameters
----------
bkg : sherpa.astro.data.DataPHA instance
The background dataset to add. This object may be changed
by this method.
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
See Also
--------
delete_background, get_background
"""
id = self._fix_background_id(id)
self._backgrounds[id] = bkg
ids = self.background_ids[:]
if id not in ids:
ids.append(id)
self.background_ids = ids
# Copy over data from the source to the background
# if its not present in the background:
# - background and grouping
# - response information (ONLY THE FIRST TERM)
#
# The units (only when a response is present), rate, and
# plot_fac values are always copied.
#
if bkg.grouping is None:
bkg.grouping = self.grouping
bkg.grouped = bkg.grouping is not None
if bkg.quality is None:
bkg.quality = self.quality
if bkg.get_response() == (None, None):
bkg.set_response(*self.get_response())
if bkg.get_response() != (None, None):
bkg.units = self.units
bkg.rate = self.rate
bkg.plot_fac = self.plot_fac
def delete_background(self, id=None):
"""Remove the background component.
If the background component does not exist then the method
does nothing.
Parameters
----------
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
See Also
--------
set_background
Notes
-----
If this call removes the last of the background components
then the subtracted flag is cleared (if set).
"""
id = self._fix_background_id(id)
self._backgrounds.pop(id, None)
if len(self._backgrounds) == 0:
self._subtracted = False
ids = self.background_ids[:]
if id in ids:
ids.remove(id)
self.background_ids = ids
def get_background_scale(self, bkg_id=1, units='counts',
group=True, filter=False):
"""Return the correction factor for the background dataset.
.. versionchanged:: 4.12.2
The bkg_id, units, group, and filter parameters have been
added and the routine no-longer calculates the average
scaling for all the background components but just for the
given component.
Parameters
----------
bkg_id : int or str, optional
The background component to use (the default is 1).
units : {'counts', 'rate'}, optional
The correction is applied to a model defined as counts, the
default, or a rate. The latter should be used when
calculating the correction factor for adding the background
data to the source aperture.
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
scale : None, number, or NumPy array
The scaling factor to correct the background data onto the
source data set. If bkg_id is not valid then None is
returned.
Notes
-----
The correction factor when units is 'counts' is::
scale_exposure * scale_backscal * scale_areascal / nbkg
where nbkg is the number of background components and
scale_x is the source value divided by the background
value for the field x.
When units is 'rate' the correction is:
scale_backscal / nbkg
and it is currently uncertain whether it should include the
AREASCAL scaling.
"""
if units not in ['counts', 'rate']:
raise ValueError("Invalid units argument: {}".format(units))
if bkg_id not in self.background_ids:
return None
nbkg = len(self.background_ids)
def correct(obj):
"""Correction factor for the object"""
ans = 1.0
# Should we set 0 values to 1 at this stage?
#
if obj.backscal is not None:
ans *= self._check_scale(obj.backscal, group=False)
if obj.areascal is not None and units == 'counts':
ans *= self._check_scale(obj.areascal, group=False)
if obj.exposure is not None and units == 'counts':
ans *= self._check_scale(obj.exposure, group=False)
return ans
src = correct(self)
bkg = correct(self.get_background(bkg_id))
scale = src / bkg / nbkg
return self._check_scale(scale, group=group, filter=filter)
def _check_scale(self, scale, group=True, filter=False):
"""Ensure the scale value is positive and filtered/grouped.
Parameters
----------
scale : number or numpy array
The scale factor.
group : bool, optional
Is any grouping applied to the data? This is only
relevant for an array.
filter : bool, optional
Is any filter applied? This is only checked if group
is True.
Returns
-------
scale : number or numpy array
Negative values are replaced by 1.0.
"""
if numpy.isscalar(scale) and scale <= 0.0:
scale = 1.0
elif numpy.iterable(scale):
scale = numpy.asarray(scale, dtype=SherpaFloat)
if group:
if filter:
scale = self.apply_filter(scale, self._middle)
else:
scale = self.apply_grouping(scale, self._middle)
scale[scale <= 0.0] = 1.0
return scale
def get_backscal(self, group=True, filter=False):
"""Return the background scaling of the PHA data set.
Return the BACKSCAL setting [BSCAL]_ for the PHA data set.
Parameters
----------
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
backscal : number or ndarray
The BACKSCAL value, which can be a scalar or a 1D array.
See Also
--------
get_areascal, get_background_scale
Notes
-----
The BACKSCAL value can be defined as the ratio of the area of
the source (or background) extraction region in image pixels
to the total number of image pixels. The fact that there is no
ironclad definition for this quantity does not matter so long
as the value for a source dataset and its associated
background dataset are defined in the same manner, because
only the ratio of source and background BACKSCAL values is
used. It can be a scalar or an array.
References
----------
.. [BSCAL] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
>>> pha.get_backscal()
7.8504301607718007e-06
"""
backscal = self.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group, filter)
return backscal
def get_areascal(self, group=True, filter=False):
"""Return the fractional area factor of the PHA data set.
Return the AREASCAL setting [ASCAL]_ for the PHA data set.
Parameters
----------
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
areascal : number or ndarray
The AREASCAL value, which can be a scalar or a 1D array.
See Also
--------
get_backscal, get_background_scale
Notes
-----
The fractional area scale is normally set to 1, with the ARF used
to scale the model.
References
----------
.. [ASCAL] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
>>> pha.get_areascal()
1.0
"""
areascal = self.areascal
if areascal is not None:
areascal = self._check_scale(areascal, group, filter)
return areascal
def apply_filter(self, data, groupfunc=numpy.sum):
"""
Filter the array data, first passing it through apply_grouping()
(using groupfunc) and then applying the general filters
"""
if data is None:
return data
if len(data) != len(self.counts):
counts = numpy.zeros(len(self.counts), dtype=SherpaFloat)
mask = self.get_mask()
if mask is not None:
counts[mask] = numpy.asarray(data, dtype=SherpaFloat)
data = counts
# else:
# raise DataErr('mismatch', "filter", "data array")
return super().apply_filter(self.apply_grouping(data, groupfunc))
def apply_grouping(self, data, groupfunc=numpy.sum):
"""
Apply the data set's grouping scheme to the array data,
combining the grouped data points with groupfunc, and return
the grouped array. If the data set has no associated grouping
scheme or the data are ungrouped, data is returned unaltered.
"""
if data is None or not self.grouped:
return data
groups = self.grouping
filter = self.quality_filter
if filter is None:
return do_group(data, groups, groupfunc.__name__)
if len(data) != len(filter) or len(groups) != len(filter):
raise DataErr('mismatch', "quality filter", "data array")
filtered_data = numpy.asarray(data)[filter]
groups = numpy.asarray(groups)[filter]
grouped_data = do_group(filtered_data, groups, groupfunc.__name__)
if data is self.channel and groupfunc is self._make_groups:
return numpy.arange(1, len(grouped_data) + 1, dtype=int)
return grouped_data
def ignore_bad(self):
"""Exclude channels marked as bad.
Ignore any bin in the PHA data set which has a quality value
that is larger than zero.
Raises
------
sherpa.utils.err.DataErr
If the data set has no quality array.
See Also
--------
ignore : Exclude data from the fit.
notice : Include data in the fit.
Notes
-----
Bins with a non-zero quality setting are not automatically
excluded when a data set is created.
If the data set has been grouped, then calling `ignore_bad`
will remove any filter applied to the data set. If this
happens a warning message will be displayed.
"""
if self.quality is None:
raise DataErr("noquality", self.name)
qual_flags = ~numpy.asarray(self.quality, bool)
if self.grouped and (self.mask is not True):
self.notice()
warning('filtering grouped data with quality flags,' +
' previous filters deleted')
elif not self.grouped:
# if ungrouped, create/combine with self.mask
if self.mask is not True:
self.mask = self.mask & qual_flags
return
else:
self.mask = qual_flags
return
# self.quality_filter used for pre-grouping filter
self.quality_filter = qual_flags
def _dynamic_group(self, group_func, *args, **kwargs):
keys = list(kwargs.keys())[:]
for key in keys:
if kwargs[key] is None:
kwargs.pop(key)
old_filter = self.get_filter(group=False)
do_notice = numpy.iterable(self.mask)
self.grouping, self.quality = group_func(*args, **kwargs)
self.group()
self._original_groups = False
if do_notice:
# self.group() above has cleared the filter if applicable
# No, that just sets a flag. So manually clear filter
# here
self.ignore()
for vals in parse_expr(old_filter):
self.notice(*vals)
# warning('grouping flags have changed, noticing all bins')
# Have to move this check here; as formerly written, reference
# to pygroup functions happened *before* checking groupstatus,
# in _dynamic_group. So we did not return the intended error
# message; rather, a NameError was raised stating that pygroup
# did not exist in global scope (not too clear to the user).
#
# The groupstatus check thus has to be done in *each* of the following
# group functions.
# # Dynamic grouping functions now automatically impose the
# # same grouping conditions on *all* associated background data sets.
# # CIAO 4.5 bug fix, 05/01/2012
def group_bins(self, num, tabStops=None):
"""Group into a fixed number of bins.
Combine the data so that there `num` equal-width bins (or
groups). The binning scheme is applied to all the channels,
but any existing filter - created by the `ignore` or `notice`
set of functions - is re-applied after the data has been
grouped.
Parameters
----------
num : int
The number of bins in the grouped data set. Each bin
will contain the same number of channels.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
Since the bin width is an integer number of channels, it is
likely that some channels will be "left over". This is even
more likely when the `tabStops` parameter is set. If this
happens, a warning message will be displayed to the screen and
the quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpNumBins, len(self.channel), num,
tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_bins(num, tabStops=tabStops)
def group_width(self, val, tabStops=None):
"""Group into a fixed bin width.
Combine the data so that each bin contains `num` channels.
The binning scheme is applied to all the channels, but any
existing filter - created by the `ignore` or `notice` set of
functions - is re-applied after the data has been grouped.
Parameters
----------
val : int
The number of channels to combine into a group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
Notes
-----
Unless the requested bin width is a factor of the number of
channels (and no `tabStops` parameter is given), then some
channels will be "left over". If this happens, a warning
message will be displayed to the screen and the quality value
for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpBinWidth, len(self.channel), val,
tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_width(val, tabStops=tabStops)
def group_counts(self, num, maxLength=None, tabStops=None):
"""Group into a minimum number of counts per bin.
Combine the data so that each bin contains `num` or more
counts. The binning scheme is applied to all the channels, but
any existing filter - created by the `ignore` or `notice` set
of functions - is re-applied after the data has been grouped.
The background is *not* included in this calculation; the
calculation is done on the raw data even if `subtract` has
been called on this data set.
Parameters
----------
num : int
The number of channels to combine into a group.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpNumCounts, self.counts, num,
maxLength=maxLength, tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_counts(num, maxLength=maxLength, tabStops=tabStops)
# DOC-TODO: see discussion in astro.ui.utils regarding errorCol
def group_snr(self, snr, maxLength=None, tabStops=None, errorCol=None):
"""Group into a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
which exceeds `snr`. The binning scheme is applied to all the
channels, but any existing filter - created by the `ignore` or
`notice` set of functions - is re-applied after the data has
been grouped. The background is *not* included in this
calculation; the calculation is done on the raw data even if
`subtract` has been called on this data set.
Parameters
----------
snr : number
The minimum signal-to-noise ratio that must be exceeded
to form a group of channels.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpSnr, self.counts, snr,
maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_snr(snr, maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
def group_adapt(self, minimum, maxLength=None, tabStops=None):
"""Adaptively group to a minimum number of counts.
Combine the data so that each bin contains `num` or more
counts. The difference to `group_counts` is that this
algorithm starts with the bins with the largest signal, in
order to avoid over-grouping bright features, rather than at
the first channel of the data. The adaptive nature means that
low-count regions between bright features may not end up in
groups with the minimum number of counts. The binning scheme
is applied to all the channels, but any existing filter -
created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
minimum : int
The number of channels to combine into a group.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpAdaptive, self.counts, minimum,
maxLength=maxLength, tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_adapt(minimum, maxLength=maxLength,
tabStops=tabStops)
# DOC-TODO: see discussion in astro.ui.utils regarding errorCol
def group_adapt_snr(self, minimum, maxLength=None, tabStops=None,
errorCol=None):
"""Adaptively group to a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
which exceeds `minimum`. The difference to `group_snr` is that
this algorithm starts with the bins with the largest signal,
in order to avoid over-grouping bright features, rather than
at the first channel of the data. The adaptive nature means
that low-count regions between bright features may not end up
in groups with the minimum number of counts. The binning
scheme is applied to all the channels, but any existing filter
- created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
minimum : number
The minimum signal-to-noise ratio that must be exceeded
to form a group of channels.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpAdaptiveSnr, self.counts, minimum,
maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_adapt_snr(minimum, maxLength=maxLength,
tabStops=tabStops, errorCol=errorCol)
def eval_model(self, modelfunc):
return modelfunc(*self.get_indep(filter=False))
def eval_model_to_fit(self, modelfunc):
return self.apply_filter(modelfunc(*self.get_indep(filter=True)))
def sum_background_data(self,
get_bdata_func=(lambda key, bkg: bkg.counts)):
"""Sum up data, applying the background correction value.
Parameters
----------
get_bdata_func : function, optional
What data should be used for each background dataset. The
function takes the background identifier and background
DataPHA object and returns the data to use. The default is
to use the counts array of the background dataset.
Returns
-------
value : scalar or NumPy array
The sum of the data, including any area, background, and
exposure-time corrections.
Notes
-----
For each associated background, the data is retrieved (via
the get_bdata_func parameter), and then
- divided by its BACKSCAL value (if set)
- divided by its AREASCAL value (if set)
- divided by its exposure time (if set)
The individual background components are then summed together,
and then multiplied by the source BACKSCAL (if set),
multiplied by the source AREASCAL (if set), and multiplied
by the source exposure time (if set). The final step is
to divide by the number of background files used.
Example
-------
Calculate the background counts, per channel, scaled to match
the source:
>>> bcounts = src.sum_background_data()
Calculate the scaling factor that you need to multiply the
background data to match the source data. In this case the
background data has been replaced by the value 1 (rather than
the per-channel values used with the default argument):
>>> bscale = src.sum_background_data(lambda k, d: 1)
"""
bdata_list = []
for key in self.background_ids:
bkg = self.get_background(key)
bdata = get_bdata_func(key, bkg)
backscal = bkg.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group=False)
bdata = bdata / backscal
areascal = bkg.get_areascal(group=False)
if areascal is not None:
bdata = bdata / areascal
if bkg.exposure is not None:
bdata = bdata / bkg.exposure
bdata_list.append(bdata)
nbkg = len(bdata_list)
if nbkg == 0:
# do not have a good id to use for the error message
raise DataErr('nobkg', self.name)
if nbkg == 1:
bkgsum = bdata_list[0]
else:
bkgsum = sum(bdata_list)
backscal = self.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group=False)
bkgsum = backscal * bkgsum
areascal = self.areascal
if areascal is not None:
areascal = self._check_scale(areascal, group=False)
bkgsum = areascal * bkgsum
if self.exposure is not None:
bkgsum = self.exposure * bkgsum
return bkgsum / SherpaFloat(nbkg)
def get_dep(self, filter=False):
# FIXME: Aneta says we need to group *before* subtracting, but that
# won't work (I think) when backscal is an array
# if not self.subtracted:
# return self.counts
# return self.counts - self.sum_background_data()
dep = self.counts
filter = bool_cast(filter)
# The area scaling is not applied to the data, since it
# should be being applied to the model via the *PHA
# instrument model. Note however that the background
# contribution does include the source AREASCAL value
# (in the same way that the source BACKSCAL value
# is used).
#
if self.subtracted:
bkg = self.sum_background_data()
if len(dep) != len(bkg):
raise DataErr("subtractlength")
dep = dep - bkg
if filter:
dep = self.apply_filter(dep)
return dep
def set_dep(self, val):
# QUS: should this "invert" the areascaling to val
# to get the stored values?
#
# Otherwise, when areascal /= 1
# y1 = d.get_dep()
# d.set_dep(y1)
# y2 = d.get_dep()
# y1 != y2
#
# Or perhaps it removes the areascal value in this case?
# We already have this split in the API when background data
# is available and is subtracted.
#
if numpy.iterable(val):
dep = numpy.asarray(val, SherpaFloat)
else:
val = SherpaFloat(val)
dep = numpy.array([val] * len(self.get_indep()[0]))
setattr(self, 'counts', dep)
def get_staterror(self, filter=False, staterrfunc=None):
"""Return the statistical error.
The staterror column is used if defined, otherwise the
function provided by the staterrfunc argument is used to
calculate the values.
Parameters
----------
filter : bool, optional
Should the channel filter be applied to the return values?
staterrfunc : function reference, optional
The function to use to calculate the errors if the
staterror field is None. The function takes one argument,
the counts (after grouping and filtering), and returns an
array of values which represents the one-sigma error for each
element of the input array. This argument is designed to
work with implementations of the sherpa.stats.Stat.calc_staterror
method.
Returns
-------
staterror : array or None
The statistical error. It will be grouped and,
if filter=True, filtered. The contribution from any
associated background components will be included if
the background-subtraction flag is set.
Notes
-----
There is no scaling by the AREASCAL setting, but background
values are scaled by their AREASCAL settings. It is not at all
obvious that the current code is doing the right thing, or that
this is the right approach.
Examples
--------
>>> dy = dset.get_staterror()
Ensure that there is no pre-defined statistical-error column
and then use the Chi2DataVar statistic to calculate the errors:
>>> stat = sherpa.stats.Chi2DataVar()
>>> dset.set_staterror(None)
>>> dy = dset.get_staterror(staterrfunc=stat.calc_staterror)
"""
staterr = self.staterror
filter = bool_cast(filter)
if filter:
staterr = self.apply_filter(staterr, self._sum_sq)
else:
staterr = self.apply_grouping(staterr, self._sum_sq)
# The source AREASCAL is not applied here, but the
# background term is.
#
if (staterr is None) and (staterrfunc is not None):
cnts = self.counts
if filter:
cnts = self.apply_filter(cnts)
else:
cnts = self.apply_grouping(cnts)
staterr = staterrfunc(cnts)
# Need to apply the area scaling to the calculated
# errors. Grouping and filtering complicate this; is
# _middle the best choice here?
#
"""
area = self.areascal
if staterr is not None and area is not None:
if numpy.isscalar(area):
area = numpy.zeros(self.channel.size) + area
# TODO: replace with _check_scale?
if filter:
area = self.apply_filter(area, self._middle)
else:
area = self.apply_grouping(area, self._middle)
staterr = staterr / area
"""
if (staterr is not None) and self.subtracted:
bkg_staterr_list = []
# for bkg in self._backgrounds.values():
for key in self.background_ids:
bkg = self.get_background(key)
berr = bkg.staterror
if filter:
berr = self.apply_filter(berr, self._sum_sq)
else:
berr = self.apply_grouping(berr, self._sum_sq)
if (berr is None) and (staterrfunc is not None):
bkg_cnts = bkg.counts
if filter:
bkg_cnts = self.apply_filter(bkg_cnts)
else:
bkg_cnts = self.apply_grouping(bkg_cnts)
# TODO: shouldn't the following logic be somewhere
# else more general?
if hasattr(staterrfunc, '__name__') and \
staterrfunc.__name__ == 'calc_chi2datavar_errors' and \
0.0 in bkg_cnts:
mask = (numpy.asarray(bkg_cnts) != 0.0)
berr = numpy.zeros(len(bkg_cnts))
berr[mask] = staterrfunc(bkg_cnts[mask])
else:
berr = staterrfunc(bkg_cnts)
# FIXME: handle this
# assert (berr is not None)
# This case appears when the source dataset has an error
# column and at least one of the background(s) do not.
# Because the staterr is not None and staterrfunc is, I think
# we should return None. This way the user knows to call with
# staterrfunc next time.
if berr is None:
return None
bksl = bkg.backscal
if bksl is not None:
bksl = self._check_scale(bksl, filter=filter)
berr = berr / bksl
# Need to apply filter/grouping of the source dataset
# to the background areascal, so can not just say
# area = bkg.get_areascal(filter=filter)
#
area = bkg.areascal
if area is not None:
area = self._check_scale(area, filter=filter)
berr = berr / area
if bkg.exposure is not None:
berr = berr / bkg.exposure
berr = berr * berr
bkg_staterr_list.append(berr)
nbkg = len(bkg_staterr_list)
assert (nbkg > 0)
if nbkg == 1:
bkgsum = bkg_staterr_list[0]
else:
bkgsum = sum(bkg_staterr_list)
bscal = self.backscal
if bscal is not None:
bscal = self._check_scale(bscal, filter=filter)
bkgsum = (bscal * bscal) * bkgsum
# Correct the background counts by the source AREASCAL
# setting. Is this correct?
ascal = self.areascal
if ascal is not None:
ascal = self._check_scale(ascal, filter=filter)
bkgsum = (ascal * ascal) * bkgsum
if self.exposure is not None:
bkgsum = (self.exposure * self.exposure) * bkgsum
nbkg = SherpaFloat(nbkg)
if staterr is not None:
staterr = staterr * staterr + bkgsum / (nbkg * nbkg)
staterr = numpy.sqrt(staterr)
return staterr
def get_syserror(self, filter=False):
"""Return any systematic error.
Parameters
----------
filter : bool, optional
Should the channel filter be applied to the return values?
Returns
-------
syserror : array or None
The systematic error, if set. It will be grouped and,
if filter=True, filtered.
Notes
-----
There is no scaling by the AREASCAL setting.
"""
syserr = self.syserror
filter = bool_cast(filter)
if filter:
syserr = self.apply_filter(syserr, self._sum_sq)
else:
syserr = self.apply_grouping(syserr, self._sum_sq)
return syserr
def get_x(self, filter=False, response_id=None):
# We want the full channel grid with no grouping.
#
return self._from_channel(self.channel, group=False, response_id=response_id)
def get_xlabel(self):
xlabel = self.units.capitalize()
if self.units == 'energy':
xlabel += ' (keV)'
elif self.units == 'wavelength':
xlabel += ' (Angstrom)'
# elif self.units == 'channel' and self.grouped:
# xlabel = 'Group Number'
return xlabel
def _set_initial_quantity(self):
arf, rmf = self.get_response()
# Change analysis if ARFs equal or of higher resolution to
# allow for high-res model evaluation.
if arf is not None and rmf is None:
if len(arf.energ_lo) == len(self.channel):
self.units = 'energy'
# Only change analysis if RMF matches the parent PHA dataset.
if rmf is not None:
if len(self.channel) != len(rmf.e_min):
raise DataErr("incompatibleresp", rmf.name, self.name)
self.units = 'energy'
def _fix_y_units(self, val, filter=False, response_id=None):
"""Rescale the data to match the 'y' axis."""
if val is None:
return val
filter = bool_cast(filter)
# make a copy of data for units manipulation
val = numpy.array(val, dtype=SherpaFloat)
if self.rate and self.exposure is not None:
val /= self.exposure
# TODO: It is not clear if the areascal should always be applied,
# or only if self.rate is set (since it is being considered
# a "correction" to the exposure time, but don't we want
# to apply it in plots even if the Y axis is in counts?)
#
if self.areascal is not None:
areascal = self._check_scale(self.areascal, filter=filter)
val /= areascal
if self.grouped or self.rate:
if self.units != 'channel':
elo, ehi = self._get_ebins(response_id, group=False)
else:
elo, ehi = (self.channel, self.channel + 1.)
if filter:
# If we apply a filter, make sure that
# ebins are ungrouped before applying
# the filter.
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
elif self.grouped:
elo = self.apply_grouping(elo, self._min)
ehi = self.apply_grouping(ehi, self._max)
if self.units == 'energy':
ebin = ehi - elo
elif self.units == 'wavelength':
ebin = self._hc / elo - self._hc / ehi
elif self.units == 'channel':
ebin = ehi - elo
else:
raise DataErr("bad", "quantity", self.units)
val /= numpy.abs(ebin)
# The final step is to multiply by the X axis self.plot_fac
# times.
if self.plot_fac <= 0:
return val
scale = self.apply_filter(self.get_x(response_id=response_id),
self._middle)
for ii in range(self.plot_fac):
val *= scale
return val
def get_y(self, filter=False, yfunc=None, response_id=None, use_evaluation_space=False):
vallist = Data.get_y(self, yfunc=yfunc)
filter = bool_cast(filter)
if not isinstance(vallist, tuple):
vallist = (vallist,)
newvallist = []
for val in vallist:
if filter:
val = self.apply_filter(val)
else:
val = self.apply_grouping(val)
val = self._fix_y_units(val, filter, response_id)
newvallist.append(val)
if len(vallist) == 1:
vallist = newvallist[0]
else:
vallist = tuple(newvallist)
return vallist
def get_yerr(self, filter=False, staterrfunc=None, response_id=None):
filter = bool_cast(filter)
err = self.get_error(filter, staterrfunc)
return self._fix_y_units(err, filter, response_id)
def get_xerr(self, filter=False, response_id=None):
elo, ehi = self._get_ebins(response_id=response_id)
filter = bool_cast(filter)
if filter:
# If we apply a filter, make sure that
# ebins are ungrouped before applying
# the filter.
elo, ehi = self._get_ebins(response_id, group=False)
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
return ehi - elo
def get_ylabel(self):
ylabel = 'Counts'
if self.rate and self.exposure:
ylabel += '/sec'
if self.rate or self.grouped:
if self.units == 'energy':
ylabel += '/keV'
elif self.units == 'wavelength':
ylabel += '/Angstrom'
elif self.units == 'channel':
ylabel += '/channel'
if self.plot_fac:
from sherpa.plot import backend
latex = backend.get_latex_for_string(
'^{}'.format(self.plot_fac))
ylabel += ' X {}{}'.format(self.units.capitalize(), latex)
return ylabel
@staticmethod
# Dummy function to tell apply_grouping to construct
# an array of groups.
def _make_groups(array):
pass
@staticmethod
def _middle(array):
array = numpy.asarray(array)
return (array.min() + array.max()) / 2.0
@staticmethod
def _min(array):
array = numpy.asarray(array)
return array.min()
@staticmethod
def _max(array):
array = numpy.asarray(array)
return array.max()
@staticmethod
def _sum_sq(array):
return numpy.sqrt(numpy.sum(array * array))
def get_noticed_channels(self):
"""Return the noticed channels.
Returns
-------
channels : ndarray
The noticed channels (this is independent of the
analysis setting).
"""
chans = self.channel
mask = self.get_mask()
if mask is None:
return chans
# This is added to address issue #361
#
# If there is a quality filter then the mask may be
# smaller than the chans array. It is not clear if this
# is the best location for this. If it is, then are there
# other locations where this logic is needed?
#
if self.quality_filter is not None and \
self.quality_filter.size != mask.size:
chans = chans[self.quality_filter]
return chans[mask]
def get_mask(self):
"""Returns the (ungrouped) mask.
Returns
-------
mask : ndarray or None
The mask, in channels, or None.
"""
groups = self.grouping
if self.mask is False:
return None
if self.mask is True or not self.grouped:
if self.quality_filter is not None:
return self.quality_filter
elif numpy.iterable(self.mask):
return self.mask
return None
if self.quality_filter is not None:
groups = groups[self.quality_filter]
return expand_grouped_mask(self.mask, groups)
def get_noticed_expr(self):
chans = self.get_noticed_channels()
if self.mask is False or len(chans) == 0:
return 'No noticed channels'
return create_expr(chans, format='%i')
def get_filter(self, group=True, format='%.12f', delim=':'):
"""Return the data filter as a string.
For grouped data, or when the analysis setting is not
channel, filter values refer to the center of the
channel or group.
Parameters
----------
group : bool, optional
Should the filter reflect the grouped data?
format : str, optional
The formatting of the numeric values (this is
ignored for channel units, as a format of "%i"
is used).
delim : str, optional
The string used to mark the low-to-high range.
Examples
--------
For a Chandra non-grating dataset which has been grouped:
>>> pha.set_analysis('energy')
>>> pha.notice(0.5, 7)
>>> pha.get_filter(format=%.4f')
''0.5183:8.2198''
>>> pha.set_analysis('channel')
>>> pha.get_filter()
'36:563'
The default is to show the data range for the grouped
dataset, which uses the center of each group. If
the grouping is turned off then the center of the
start and ending channel of each group is used
(and so show a larger data range):
>>> pha.get_filter(format=%.4f')
'0.5183:8.2198'
>>> pha.get_filter(group=False, format=%.4f')
'0.4745:9.8623'
"""
if self.mask is False:
return 'No noticed bins'
if numpy.iterable(self.mask):
mask = self.mask
else:
mask = None
if group:
# grouped noticed channels
#
x = self.apply_filter(self.channel, self._make_groups)
else:
# ungrouped noticed channels
x = self.get_noticed_channels()
# We need the "ungrouped" mask array. Need to check
# issue #361 since get_noticed_channels notes an
# issue that may be relevant here (so far this
# doesn't seem to be the case).
#
mask = self.get_mask()
# Safety check for users. Warn, but continue.
#
if mask is not None and mask.sum() != x.size:
warning("There is a mis-match in the ungrouped mask " +
"and data ({} vs {})".format(mask.sum(), x.size))
# Convert channels to appropriate quantity if necessary
x = self._from_channel(x, group=group)
if mask is None:
mask = numpy.ones(len(x), dtype=bool)
# Ensure the data is in ascending order for create_expr.
#
if self.units == 'wavelength':
x = x[::-1]
mask = mask[::-1]
if self.units == 'channel':
format = '%i'
return create_expr(x, mask=mask, format=format, delim=delim)
def get_filter_expr(self):
return (self.get_filter(format='%.4f', delim='-') +
' ' + self.get_xlabel())
def notice_response(self, notice_resp=True, noticed_chans=None):
notice_resp = bool_cast(notice_resp)
if notice_resp and noticed_chans is None:
noticed_chans = self.get_noticed_channels()
for id in self.response_ids:
arf, rmf = self.get_response(id)
_notice_resp(noticed_chans, arf, rmf)
def notice(self, lo=None, hi=None, ignore=False, bkg_id=None):
# If any background IDs are actually given, then impose
# the filter on those backgrounds *only*, and return. Do
# *not* impose filter on data itself. (Revision possibly
# this should be done in high-level UI?) SMD 10/25/12
filter_background_only = False
if bkg_id is not None:
if not numpy.iterable(bkg_id):
bkg_id = [bkg_id]
filter_background_only = True
else:
bkg_id = self.background_ids
# Automatically impose data's filter on background data sets.
# Units must agree for this to be meaningful, so temporarily
# make data and background units match. SMD 10/25/12
for bid in bkg_id:
bkg = self.get_background(bid)
old_bkg_units = bkg.units
try:
bkg.units = self.units
# If the background is all ignored then bkg.notice will
# do nothing (other than display an INFO message).
#
bkg.notice(lo, hi, ignore)
finally:
bkg.units = old_bkg_units
# If we're only supposed to filter backgrounds, return
if filter_background_only:
return
# Go on if we are also supposed to filter the source data
ignore = bool_cast(ignore)
if lo is None and hi is None:
self.quality_filter = None
self.notice_response(False)
# We do not want a "all data are masked out" error to cause
# this to fail; it should just do nothing (as trying to set
# a noticed range to include masked-out ranges would also
# be ignored).
#
# Convert to "group number" (which, for ungrouped data,
# is just channel number).
#
if lo is not None and type(lo) != str:
try:
lo = self._to_channel(lo)
except DataErr as de:
info("Skipping dataset {}: {}".format(self.name,
de))
return
if hi is not None and type(hi) != str:
try:
hi = self._to_channel(hi)
except DataErr as de:
info("Skipping dataset {}: {}".format(self.name,
de))
return
elo, ehi = self._get_ebins()
if ((self.units == "wavelength" and
elo[0] < elo[-1] and ehi[0] < ehi[-1]) or
(self.units == "energy" and
elo[0] > elo[-1] and ehi[0] > ehi[-1])):
lo, hi = hi, lo
# Don't use the middle of the channel anymore as the
# grouping function. That was just plain dumb.
# So just get back an array of groups 1-N, if grouped
# DATA-NOTE: need to clean this up.
#
groups = self.apply_grouping(self.channel,
self._make_groups)
self._data_space.filter.notice((lo,), (hi,),
(groups,), ignore)
def to_guess(self):
elo, ehi = self._get_ebins(group=False)
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
if self.units == "wavelength":
lo = self._hc / ehi
hi = self._hc / elo
elo = lo
ehi = hi
cnt = self.get_dep(True)
arf = self.get_specresp(filter=True)
y = cnt / (ehi - elo)
if self.exposure is not None:
y /= self.exposure # photons/keV/sec or photons/Ang/sec
# y = cnt/arf/self.exposure
if arf is not None:
y /= arf # photons/keV/cm^2/sec or photons/Ang/cm^2/sec
return (y, elo, ehi)
def to_fit(self, staterrfunc=None):
return (self.get_dep(True),
self.get_staterror(True, staterrfunc),
self.get_syserror(True))
def to_plot(self, yfunc=None, staterrfunc=None, response_id=None):
return (self.apply_filter(self.get_x(response_id=response_id),
self._middle),
self.get_y(True, yfunc, response_id=response_id),
self.get_yerr(True, staterrfunc, response_id=response_id),
self.get_xerr(True, response_id=response_id),
self.get_xlabel(),
self.get_ylabel())
def group(self):
"Group the data according to the data set's grouping scheme"
self.grouped = True
def ungroup(self):
"Ungroup the data"
self.grouped = False
def subtract(self):
"Subtract the background data"
self.subtracted = True
def unsubtract(self):
"Remove background subtraction"
self.subtracted = False
class DataIMG(Data2D):
"Image data set, including functions for coordinate transformations"
_fields = Data2D._fields + ("sky", "eqpos", "coord", "header")
def _get_coord(self):
return self._coord
def _set_coord(self, val):
coord = str(val).strip().lower()
if coord in ('logical', 'image'):
coord = 'logical'
elif coord in ('physical',):
self._check_physical_transform()
coord = 'physical'
elif coord in ('world', 'wcs'):
self._check_world_transform()
coord = 'world'
else:
raise DataErr('bad', 'coordinates', val)
self._coord = coord
# You should use set_coord rather than changing coord directly,
# otherwise constraints set in set_coord are not run. This is
# probably an error in set_coord (i.e. this logic should be
# moved into _set_coord).
#
coord = property(_get_coord, _set_coord,
doc='Coordinate system of independent axes')
def __init__(self, name, x0, x1, y, shape=None, staterror=None,
syserror=None, sky=None, eqpos=None, coord='logical',
header=None):
self.sky = sky
self.eqpos = eqpos
self.coord = coord
self.header = header
self._region = None
Data2D.__init__(self, name, x0, x1, y, shape, staterror, syserror)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the data
"""
return html_img(self)
def __getstate__(self):
state = self.__dict__.copy()
# Function pointers to methods of the class
# (of type 'instancemethod') are NOT picklable
# remove them and restore later with a coord init
# del state['_get_logical']
# del state['_get_physical']
# del state['_get_world']
# PyRegion objects (of type 'extension') are NOT picklable, yet.
# preserve the region string and restore later with constructor
state['_region'] = state['_region'].__str__()
return state
def __setstate__(self, state):
# Populate the function pointers we deleted at pickle time with
# no-ops.
# self.__dict__['_get_logical']=(lambda : None)
# self.__dict__['_get_physical']=(lambda : None)
# self.__dict__['_get_world']=(lambda : None)
if 'header' not in state:
self.header = None
self.__dict__.update(state)
# _set_coord will correctly define the _get_* WCS function pointers.
self._set_coord(state['_coord'])
if regstatus:
self._region = Region(self._region)
else:
# An ImportErr could be raised rather than display a
# warnng, but that would make it harder for the user
# to extract useful data (e.g. in the case of triggering
# this when loading a pickled file).
#
if self._region is not None and self._region != '':
warning("Unable to restore region={} as region module is not avaialable.".format(self._region))
self._region = None
def _check_physical_transform(self):
if self.sky is None:
raise DataErr('nocoord', self.name, 'physical')
def _check_world_transform(self):
if self.eqpos is None:
raise DataErr('nocoord', self.name, 'world')
def _logical_to_physical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_physical_transform()
# logical -> physical
x0, x1 = self.sky.apply(x0, x1)
return (x0, x1)
def _logical_to_world(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# logical -> physical
if self.sky is not None:
x0, x1 = self.sky.apply(x0, x1)
# physical -> world
x0, x1 = self.eqpos.apply(x0, x1)
return (x0, x1)
def _physical_to_logical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_physical_transform()
# physical -> logical
x0, x1 = self.sky.invert(x0, x1)
return (x0, x1)
def _physical_to_world(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# physical -> world
x0, x1 = self.eqpos.apply(x0, x1)
return (x0, x1)
def _world_to_logical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# world -> physical
x0, x1 = self.eqpos.invert(x0, x1)
# physical -> logical
if self.sky is not None:
x0, x1 = self.sky.invert(x0, x1)
return (x0, x1)
def _world_to_physical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# world -> physical
x0, x1 = self.eqpos.invert(x0, x1)
return (x0, x1)
def get_logical(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'logical':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_logical')(x0, x1)
return (x0, x1)
def get_physical(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'physical':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_physical')(x0, x1)
return (x0, x1)
def get_world(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'world':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_world')(x0, x1)
return (x0, x1)
# For compatibility with old Sherpa keywords
get_image = get_logical
get_wcs = get_world
def set_coord(self, coord):
coord = str(coord).strip().lower()
# Destroys original data to conserve memory for big imgs
good = ('logical', 'image', 'physical', 'world', 'wcs')
if coord not in good:
raise DataErr('badchoices', 'coordinates', coord, ", ".join(good))
if coord.startswith('wcs'):
coord = 'world'
elif coord.startswith('image'):
coord = 'logical'
func = getattr(self, 'get_' + coord)
self.indep = func()
self._set_coord(coord)
def get_filter_expr(self):
if self._region is not None:
return str(self._region)
return ''
get_filter = get_filter_expr
def notice2d(self, val=None, ignore=False):
"""Apply a 2D filter.
Parameters
----------
val : str or None, optional
The filter to apply. It can be a region string or a
filename.
ignore : bool, optional
If set then the filter should be ignored, not noticed.
"""
ignore = bool_cast(ignore)
# This was originally a bit-more complex, but it has been
# simplified.
#
if val is None:
self.mask = not ignore
self._region = None
return
if not regstatus:
raise ImportErr('importfailed', 'region', 'notice2d')
# Crete the new region
#
val = str(val).strip()
isfile = os.path.isfile(val)
reg = Region(val, isfile)
# Calculate the mask for this region as an "included"
# region.
#
mask = reg.mask(self.get_x0(), self.get_x1())
mask = mask.astype(numpy.bool)
# Apply the new mask to the existing mask.
#
if not ignore:
if self.mask is True:
self.mask = mask
else:
self.mask |= mask
else:
# Invert the response from region_mask
mask = ~mask
if self.mask is False:
self.mask = mask
else:
self.mask &= mask
# Create the new region shape.
#
if self._region is None:
if ignore:
reg.invert()
self._region = reg
else:
self._region = self._region.combine(reg, ignore)
def get_bounding_mask(self):
mask = self.mask
shape = None
if numpy.iterable(self.mask):
# create bounding box around noticed image regions
mask = numpy.array(self.mask).reshape(*self.shape)
# TODO: should replace 'mask == True' with mask but
# not sure we have a good set of tests
x0_i, x1_i = numpy.where(mask == True)
x0_lo = x0_i.min()
x0_hi = x0_i.max()
x1_lo = x1_i.min()
x1_hi = x1_i.max()
# TODO: subset mask and then ask its shape
shape = mask[x0_lo:x0_hi + 1, x1_lo:x1_hi + 1].shape
mask = mask[x0_lo:x0_hi + 1, x1_lo:x1_hi + 1]
mask = mask.ravel()
return mask, shape
def get_img(self, yfunc=None):
# FIXME add support for coords to image class -> DS9
self._check_shape()
y_img = self.filter_region(self.get_dep(False))
if yfunc is not None:
m = self.eval_model_to_fit(yfunc)
if numpy.iterable(self.mask):
# if filtered, the calculated model must be padded up
# to the data size to preserve img shape and WCS coord
m = pad_bounding_box(m, self.mask)
y_img = (y_img, self.filter_region(m))
if yfunc is not None:
y_img = (y_img[0].reshape(*self.shape),
y_img[1].reshape(*self.shape))
else:
y_img = y_img.reshape(*self.shape)
return y_img
def get_axes(self):
# FIXME: how to filter an axis when self.mask is size of self.y?
self._check_shape()
# dummy placeholders needed b/c img shape may not be square!
axis0 = numpy.arange(self.shape[1], dtype=float) + 1.
axis1 = numpy.arange(self.shape[0], dtype=float) + 1.
dummy0 = numpy.ones(axis0.size, dtype=float)
dummy1 = numpy.ones(axis1.size, dtype=float)
if self.coord == 'physical':
axis0, dummy = self._logical_to_physical(axis0, dummy0)
dummy, axis1 = self._logical_to_physical(dummy1, axis1)
elif self.coord == 'world':
axis0, dummy = self._logical_to_world(axis0, dummy0)
dummy, axis1 = self._logical_to_world(dummy1, axis1)
return (axis0, axis1)
def get_x0label(self):
"Return label for first dimension in 2-D view of independent axis/axes"
if self.coord in ('logical', 'image'):
return 'x0'
elif self.coord in ('physical',):
return 'x0 (pixels)'
elif self.coord in ('world', 'wcs'):
return 'RA (deg)'
else:
return 'x0'
def get_x1label(self):
"""
Return label for second dimension in 2-D view of independent axis/axes
"""
if self.coord in ('logical', 'image'):
return 'x1'
elif self.coord in ('physical',):
return 'x1 (pixels)'
elif self.coord in ('world', 'wcs'):
return 'DEC (deg)'
else:
return 'x1'
def to_contour(self, yfunc=None):
y = self.filter_region(self.get_dep(False))
if yfunc is not None:
m = self.eval_model_to_fit(yfunc)
if numpy.iterable(self.mask):
# if filtered, the calculated model must be padded up
# to the data size to preserve img shape and WCS coord
m = self.filter_region(pad_bounding_box(m, self.mask))
y = (y, m)
return (self.get_x0(),
self.get_x1(),
y,
self.get_x0label(),
self.get_x1label())
def filter_region(self, data):
if data is not None and numpy.iterable(self.mask):
filter = numpy.ones(len(self.mask), dtype=SherpaFloat)
filter[~self.mask] = numpy.nan
return data * filter
return data
class DataIMGInt(DataIMG):
_fields = Data2DInt._fields + ("sky", "eqpos", "coord")
def __init__(self, name, x0lo, x1lo, x0hi, x1hi, y, shape=None,
staterror=None, syserror=None, sky=None, eqpos=None,
coord='logical', header=None):
self._region = None
self.sky = sky
self.eqpos = eqpos
self.coord = coord
self.header = header
self.shape = shape
Data.__init__(self, name, (x0lo, x1lo, x0hi, x1hi), y, staterror, syserror)
def _init_data_space(self, filter, *data):
return IntegratedDataSpace2D(filter, *data)
def get_logical(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'logical':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_logical')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_physical(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'physical':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_physical')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_world(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'world':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_world')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_axes(self):
# FIXME: how to filter an axis when self.mask is size of self.y?
self._check_shape()
# dummy placeholders needed b/c img shape may not be square!
axis0lo = numpy.arange(self.shape[1], dtype=float) - 0.5
axis1lo = numpy.arange(self.shape[0], dtype=float) - 0.5
axis0hi = numpy.arange(self.shape[1], dtype=float) + 0.5
axis1hi = numpy.arange(self.shape[0], dtype=float) + 0.5
dummy0 = numpy.ones(axis0lo.size, dtype=float)
dummy1 = numpy.ones(axis1lo.size, dtype=float)
if self.coord == 'physical':
axis0lo, dummy = self._logical_to_physical(axis0lo, dummy0)
axis0hi, dummy = self._logical_to_physical(axis0hi, dummy0)
dummy, axis1lo = self._logical_to_physical(dummy1, axis1lo)
dummy, axis1hi = self._logical_to_physical(dummy1, axis1hi)
elif self.coord == 'world':
axis0lo, dummy = self._logical_to_world(axis0lo, dummy0)
axis0hi, dummy = self._logical_to_world(axis0hi, dummy0)
dummy, axis1lo = self._logical_to_world(dummy1, axis1lo)
dummy, axis1hi = self._logical_to_world(dummy1, axis1hi)
return (axis0lo, axis1lo, axis0hi, axis1hi)
|
gpl-3.0
|
thirdwing/mxnet
|
example/kaggle-ndsb1/submission_dsb.py
|
52
|
5048
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
|
apache-2.0
|
jmargeta/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
4
|
4459
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD
import logging
from time import time
from numpy.random import RandomState
import pylab as pl
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
pl.figure(figsize=(2. * n_col, 2.26 * n_row))
pl.suptitle(title, size=16)
for i, comp in enumerate(images):
pl.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
pl.imshow(comp.reshape(image_shape), cmap=pl.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
pl.xticks(())
pl.yticks(())
pl.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True,
max_iter=10),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
pl.show()
|
bsd-3-clause
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/mixture/tests/test_dpgmm.py
|
34
|
2573
|
import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from .test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
|
apache-2.0
|
dennisobrien/bokeh
|
bokeh/models/sources.py
|
2
|
27041
|
from __future__ import absolute_import
import warnings
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_array
from ..util.warnings import BokehUserWarning
from .callbacks import Callback, CustomJS
from .filters import Filter
from .selections import Selection, SelectionPolicy, UnionRenderers
pd = import_optional('pandas')
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Instance(Selection, default=lambda: Selection(), help="""
A Selection that indicates selected indices on this DataSource.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
.. note:
This property is left for backwards compatibility, but may be deprecated
in the future. Prefer ``source.selected.js_on_change(...)`` for new code.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
selection_policy = Instance(SelectionPolicy, default=lambda: UnionRenderers(), help="""
An instance of a SelectionPolicy that determines how selections are set.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ColumnDataSource initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
.. note::
``ColumnDataSource`` only creates a shallow copy of ``data``. Use e.g.
``ColumnDataSource(copy.deepcopy(data))`` if initializing from another
``ColumnDataSource.data`` object that you want to keep independent.
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` columns have multiple levels,
they will be flattend using an underscore (e.g. level_0_col_level_1_col).
The index of the DataFrame will be flattened to an ``Index`` of tuples
if it's a ``MultiIndex``, and then reset using ``reset_index``. The result
will be a column with the same name if the index was named, or
level_0_name_level_1_name if it was a named ``MultiIndex``. If the
``Index`` did not have a name or the ``MultiIndex`` name could not be
flattened/determined, the ``reset_index`` function will name the index column
``index``, or ``level_0`` if the name ``index`` is not available.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped orginal columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""").asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
self.data.update(raw_data)
@property
def column_names(self):
''' A list of the column names in this data source.
'''
return list(self.data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
# Flatten columns
if isinstance(df.columns, pd.MultiIndex):
try:
_df.columns = ['_'.join(col) for col in _df.columns.values]
except TypeError:
raise TypeError('Could not flatten MultiIndex columns. '
'use string column names or flatten manually')
# Flatten index
index_name = ColumnDataSource._df_index_name(df)
if index_name == 'index':
_df.index = pd.Index(_df.index.values)
else:
_df.index = pd.Index(_df.index.values, name=index_name)
_df.reset_index(inplace=True)
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
new_data[k] = v
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@staticmethod
def _df_index_name(df):
''' Return the Bokeh-appropriate column name for a DataFrame index
If there is no named index, then `"index" is returned.
If there is a single named index, then ``df.index.name`` is returned.
If there is a multi-index, and the index names are all strings, then
the names are joined with '_' and the result is returned, e.g. for a
multi-index ``['ind1', 'ind2']`` the result will be "ind1_ind2".
Otherwise if any index name is not a string, the fallback name "index"
is returned.
Args:
df (DataFrame) : the DataFrame to find an index name for
Returns:
str
'''
if df.index.name:
return df.index.name
elif df.index.names:
try:
return "_".join(df.index.names)
except TypeError:
return "index"
else:
return "index"
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas dataframe.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
# calls internal implementation
self._stream(new_data, rollover)
def _stream(self, new_data, rollover=None, setter=None):
''' Internal implementation to efficiently update data source columns
with new append-only data. The interal implementation adds the setter
attribute. [https://github.com/bokeh/bokeh/issues/6577]
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq] or DataFrame or Series) : a mapping of
column names to sequences of new data to append to each column,
a pandas DataFrame, or a pandas Series in case of a single row -
in this case the Series index is used as column names
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
needs_length_check = True
if pd and isinstance(new_data, pd.Series):
new_data = new_data.to_frame().T
if pd and isinstance(new_data, pd.DataFrame):
needs_length_check = False # DataFrame lengths equal by definition
_df = new_data
newkeys = set(_df.columns)
index_name = ColumnDataSource._df_index_name(_df)
newkeys.add(index_name)
new_data = dict(_df.iteritems())
new_data[index_name] = _df.index.values
else:
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
import numpy as np
if needs_length_check:
lengths = set()
arr_types = (np.ndarray, pd.Series) if pd else np.ndarray
for k, x in new_data.items():
if isinstance(x, arr_types):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
# slightly awkward that we have to call convert_datetime_array here ourselves
# but the downstream code expects things to already be ms-since-epoch
for key, values in new_data.items():
if pd and isinstance(values, (pd.Series, pd.Index)):
values = values.values
old_values = self.data[key]
# Apply the transformation if the new data contains datetimes
# but the current data has already been transformed
if (isinstance(values, np.ndarray) and values.dtype.kind.lower() == 'm' and
isinstance(old_values, np.ndarray) and old_values.dtype.kind.lower() != 'm'):
new_data[key] = convert_datetime_array(values)
else:
new_data[key] = values
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
ColumnDataSource, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 22, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter)
def _check_slice(s):
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError("Patch slices must have start < end, got %s" % s)
if (s.start is not None and s.start < 0) or \
(s.stop is not None and s.stop < 0) or \
(s.step is not None and s.step < 0):
raise ValueError("Patch slices must have non-negative (start, stop, step) values, got %s" % s)
class CDSView(Model):
''' A view into a ColumnDataSource that represents a row-wise subset.
'''
filters = List(Instance(Filter), default=[], help="""
List of filters that the view comprises.
""")
source = Instance(ColumnarDataSource, help="""
The ColumnDataSource associated with this view. Used to determine
the length of the columns.
""")
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
''' Base class for remote column data sources that can update from data
URLs at prescribed time intervals.
.. note::
This base class is typically not useful to instantiate on its own.
'''
data_url = String(help="""
A URL to to fetch data from.
""")
polling_interval = Int(help="""
A polling interval (in milliseconds) for updating data source.
""")
class AjaxDataSource(RemoteSource):
''' A data source that can populate columns by making Ajax calls to REST
enpoints.
The ``AjaxDataSource`` can be especially useful if you want to make a
standalone document (i.e. not backed by the Bokeh server) that can still
dynamically update using an existing REST API.
The response from the REST API should match the ``.data`` property of a
standard ``ColumnDataSource``, i.e. a JSON dict that maps names to arrays
of values:
.. code-block:: python
{
'x' : [1, 2, 3, ...],
'y' : [9, 3, 2, ...]
}
Alternatively, if the REST API returns a different format, a CustomJS
callback can be provided to convert the REST response into Bokeh format,
via the ``adapter`` property of this data source.
A full example can be seen at :bokeh-tree:`examples/howto/ajax_source.py`
'''
method = Enum('POST', 'GET', help="""
Specifiy the the HTTP method to use for the Ajax request (GET or POST)
""")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``), or to
replace existing data entirely.
""")
adapter = Instance(CustomJS, help="""
A JavaScript callback to adapt raw JSON responses to Bokeh ColumnDataSource
format.
If provided, this callback is executes immediately after the JSON data is
received, but before appending or replacing data in the data source. The
CustomJS callback will receive the AjaxDataSource as ``cb_obj`` and will
receive the raw JSON response as ``cb_data.response``. The callback code
should return a ``data`` object suitable for a Bokeh ColumnDataSource (i.e.
a mapping of string column names to arrays of data).
""")
max_size = Int(help="""
Maximum size of the data columns. If a new fetch would result in columns
larger than ``max_size``, then earlier data is dropped to make room.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in Ajax requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
Specify HTTP headers to set for the Ajax request.
Example:
.. code-block:: python
ajax_source.headers = { 'x-my-custom-header': 'some value' }
""")
|
bsd-3-clause
|
ifuding/Kaggle
|
TalkingDataFraudDetect/Code/nfold_train.py
|
1
|
6156
|
from sklearn.model_selection import KFold
from lgb import lgbm_train
import xgboost as xgb
# from functools import reduce
import numpy as np
from keras_train import DNN_Model
# import gensim
# from RCNN_Keras import get_word2vec, RCNN_Model
# from RNN_Keras import RNN_Model
from tensorflow.python.keras.models import Model
from xgb import xgb_train
# RNN_PARAMS
RCNN_HIDDEN_UNIT = [128, 64]
def nfold_train(train_data, train_label, model_types = None,
stacking = False, valide_data = None, valide_label = None,
test_data = None, train_weight = None, valide_weight = None,
flags = None ,tokenizer = None, scores = None, emb_weight = None):
"""
nfold Training
"""
print("Over all training size:")
print(train_data.shape)
print("Over all label size:")
print(train_label.shape)
fold = flags.nfold
kf = KFold(n_splits=fold, shuffle=False)
# wv_model = gensim.models.Word2Vec.load("wv_model_norm.gensim")
stacking = flags.stacking
stacking_data = None
stacking_label = None
test_preds = None
num_fold = 0
models = []
for train_index, test_index in kf.split(train_data):
# print(test_index[:100])
# exit(0)
if valide_label is None:
train_part = train_data[train_index]
train_part_label = train_label[train_index]
valide_part = train_data[test_index]
valide_part_label = train_label[test_index]
if train_weight is not None:
train_part_weight = train_weight[train_index]
valide_part_weight = train_weight[test_index]
else:
train_part = train_data
train_part_label = train_label
valide_part = valide_data
valide_part_label = valide_label
if train_weight is not None:
train_part_weight, valide_part_weight = train_weight, valide_weight
print('fold: %d th train :-)' % (num_fold))
print('Train size: {} Valide size: {}'.format(train_part_label.shape[0], valide_part_label.shape[0]))
onefold_models = []
for model_type in model_types:
if model_type == 'k':
# with tf.device('/cpu:0'):
if flags.load_only_singleCnt:
input_len = train_part.shape[1]
model = DNN_Model(hidden_dim = [int(hn.strip()) for hn in flags.full_connect_hn.strip().split(',')], \
batch_size = flags.batch_size, epochs = flags.epochs, \
batch_interval = flags.batch_interval, emb_dropout = flags.emb_dropout, \
full_connect_dropout = flags.full_connect_dropout, scores = scores, \
emb_dim = [int(e.strip()) for e in flags.emb_dim.strip().split(',')], \
load_only_singleCnt = flags.load_only_singleCnt, input_len = input_len)
if num_fold == 0:
print(model.model.summary())
# if flags.load_only_singleCnt:
# model.train(train_part, train_part_label, valide_part, valide_part_label)
# else:
model.train(train_part, train_part_label, valide_part, valide_part_label)
# if stacking:
# model = Model(inputs = model.model.inputs, outputs = model.model.get_layer(name = 'merge_sparse_emb').output)
onefold_models.append((model, 'k'))
elif model_type == 'x':
model = xgb_train(train_part, train_part_label, valide_part, valide_part_label, num_fold)
onefold_models.append((model, 'x'))
elif model_type == 'l':
model = lgbm_train(train_part, train_part_label, valide_part, valide_part_label, num_fold,
fold, flags = flags)
onefold_models.append((model, 'l'))
# if stacking:
# valide_pred = [model_eval(model[0], model[1], valide_part) for model in onefold_models]
# valide_pred = reduce((lambda x, y: np.c_[x, y]), valide_pred)
# test_pred = [model_eval(model[0], model[1], test_data) for model in onefold_models]
# test_pred = reduce((lambda x, y: np.c_[x, y]), test_pred)
# if stacking_data is None:
# stacking_data = valide_pred #np.c_[valide_part, valide_pred]
# stacking_label = valide_part_label
# test_preds = test_pred
# else:
# stacking_data = np.append(stacking_data, valide_pred, axis = 0) #np.append(stacking_data, np.c_[valide_part, valide_pred], axis = 0)
# stacking_label = np.append(stacking_label, valide_part_label, axis = 0)
# test_preds += test_pred
# print('stacking_data shape: {0}'.format(stacking_data.shape))
# print('stacking_label shape: {0}'.format(stacking_label.shape))
# print('stacking test data shape: {0}'.format(test_preds.shape))
models.append(onefold_models[0])
num_fold += 1
if num_fold == flags.ensemble_nfold:
break
# if stacking:
# test_preds /= flags.ensemble_nfold
# test_data = np.c_[test_data, test_preds]
return models, stacking_data, stacking_label, test_preds
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k' or model_type == 'LR' or model_type == 'DNN' or model_type == 'rcnn' \
or model_type == 'rnn' or model_type == 'cnn':
preds = model.predict(data_frame, verbose = 2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit)
return preds #.reshape((data_frame.shape[0], -1))
def models_eval(models, data):
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, data)
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
return preds
|
apache-2.0
|
ben-hopps/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gdk.py
|
69
|
15968
|
from __future__ import division
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as npy
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(round(x)), int(round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
im.flipud_out()
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:,:,:] = image_array
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
# unflip
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
x, y = int(x), int(y)
if x <0 or y <0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = npy.zeros((N,1), npy.uint8)
image_str = font_image.as_str()
Xall[:,0] = npy.fromstring(image_str, npy.uint8)
# get the max alpha at each pixel
Xs = npy.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
return w, h+1, h + 1
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(npy.asarray(dash_list))
dl = [max(1, int(round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGB=False):
GraphicsContextBase.set_foreground(self, fg, isRGB)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGDK(thisFig)
manager = FigureManagerBase(canvas, num)
# equals:
#manager = FigureManagerBase (FigureCanvasGDK (Figure(*args, **kwargs),
# num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(filename, format)
def get_default_filetype(self):
return 'png'
|
agpl-3.0
|
douglaswei/stock
|
fxcm/prepare/process/process.py
|
1
|
3475
|
# -*- coding: utf-8 -*-
__author__ = 'wgz'
import sys
import pandas as pd
import numpy as np
def ma(series, N):
shape = series.shape
kernel = np.ones(N) / N
result = np.convolve(series, kernel)[:shape[0]]
return result
def ema(series, N):
shape = series.shape
N = float(N)
p = 2 / (N + 1)
p_m = 1 - p
result = np.empty(shape[0])
result[0] = series[0]
for idx in range(1, shape[0]):
result[idx] = result[idx - 1] * p_m + series[idx] * p
return result
def ma_wrapper(df, N, name_pre="ma"):
"""
ma wrapper
:param df: dataframe
:param N: n for ma, ema
:param name_pre: string 'ma', 'ema'
:return:
"""
fn_obj = getattr(sys.modules[__name__], name_pre)
name = name_pre + "_%d"
name = name % N
ma_series = fn_obj(df['close'], N)
df.insert(len(df.columns), name, ma_series)
return df
def macd(df):
if 'ema_12' not in df.columns:
ma_wrapper(df, 12, 'ema')
if 'ema_26' not in df.columns:
ma_wrapper(df, 26, 'ema')
if 'diff' not in df.columns:
diff = df['ema_12'] - df['ema_26']
df.insert(len(df.columns), 'diff', diff)
diff = df['diff']
if 'dea' not in df.columns:
dea = ma(diff, 9)
df.insert(len(df.columns), 'dea', dea)
dea = df['dea']
if 'macd_bar' not in df.columns:
macd_bar = diff - dea
df.insert(len(df.columns), 'macd_bar', macd_bar)
return df
def kdj(df, N=9):
shape = df.shape[0]
h = np.empty(shape)
l = np.empty(shape)
for idx in range(shape):
beg = idx - N + 1
beg = None if beg < 0 else beg
end = idx + 1
h[idx] = np.max(df['high'][beg:end])
l[idx] = np.min(df['low'][beg:end])
rsv = (df['close'] - l) * 100.0 / (h - l)
k = np.empty(shape)
d = np.empty(shape)
k[0], d[0] = 50, 50
for idx in range(1, shape):
k[idx] = k[idx - 1] * 2 / 3 + rsv[idx] / 3
d[idx] = d[idx - 1] * 2 / 3 + k[idx] / 3
j = 3 * k - 2 * d
df.insert(len(df.columns), 'k', k)
df.insert(len(df.columns), 'd', d)
df.insert(len(df.columns), 'j', j)
return df
def rsi(df, N):
name = "rsi_%d" % N
pos = df['open'] < df['close']
series = ma(pos.astype(float), N)
df.insert(len(df.columns), name, series)
return df
def adx(df):
length = df.shape[0]
ref_c = df['close'].reset_index(drop=True)
ref_c = pd.concat([pd.Series([ref_c[0], ]), ref_c], ignore_index=True)[:length]
hd = df['high'].reset_index(drop=True) - ref_c
ld = ref_c - df['low'].reset_index(drop=True)
mp = np.empty(length)
dmp = np.empty(length)
mm = np.empty(length)
dmm = np.empty(length)
tr_i = np.max([np.abs(hd), np.abs(ld), df['high'].reset_index(drop=True) - df['low'].reset_index(drop=True)], axis=0)
tr = np.empty(length)
for i in range(length):
mp[i] = hd[i] if hd[i] > 0 and hd[i] > ld[i] else 0
mm[i] = ld[i] if ld[i] > 0 and ld[i] > hd[i] else 0
beg = i - 13
beg = None if beg < 0 else beg
end = i + 1
dmp[i] = np.sum(mp[beg:end])
dmm[i] = np.sum(mm[beg:end])
tr[i] = np.sum(tr_i[beg:end])
pdi = dmp / tr
dmi = dmm / tr
ad = np.abs(pdi - dmi) / (pdi + dmi)
adx_series = ma(ad, 14)
df.insert(len(df.columns), 'pdi', pdi)
df.insert(len(df.columns), 'dmi', dmi)
df.insert(len(df.columns), 'adx', adx_series)
return df
# test
#
# df = pd.read_csv("/Users/wgz/proj/stock/fxcm/data/EURUSD30.csv")
# ma_wrapper(df, 5)
# ma_wrapper(df, 10)
# ma_wrapper(df, 15)
# # macd(df)
# kdj(df)
# # rsi(df, 5)
# # rsi(df, 10)
# # rsi(df, 14)
# adx(df)
# xx = df[:100]
# print xx
|
gpl-2.0
|
hmelberg/motionChart
|
motionchart/motionchart.py
|
1
|
16043
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 28 15:33:33 2014
@author: Hans Olav Melberg
"""
# This is a wrapper that makes it possible to create motion charts from a pandas dataframe
#
# Acknowledgements and more information
# See https://github.com/RamyElkest/SocrMotionChartsHTML5 for more information about the javascript
# See also https://github.com/psychemedia/dataviz4development/tree/master/SocrMotionCharts
# For more bakcground, and java version, see http://www.amstat.org/publications/jse/v18n3/dinov.pdf
import os
import webbrowser
import pandas as pd
import pyperclip
from IPython.display import display, HTML, IFrame
class MotionChart(object):
''' To create a Motion Chart object from a pandas dataframe:
mc = MotionChart(df = dataframe)
To send the object to the Ipyton Notebook, to a browser, to the clipboard and to a file by writing:
mc.to_notebook()
mc.to_browser()
mc.to_clipboard()
mc.to_file()
Options and defaults (specifying which variable you want to be x, y, etc):
mc = MotionChart(
df = df,
title = "Motion Chart",
url = "http://socr.ucla.edu/htmls/HTML5/MotionChart",
key = 1,
x = 2,
y = 3,
size = 4,
color = 5,
category = 1,
xscale='linear',
yscale='linear',
play = 'true',
loop = 'false',
width = 800,
height = 600,
varLabels=None)
Explained:
df # specifies the name of the pandas dataframe used to create the motion chart, default is df
title # string. The title of the chart
url # string. url to folder with js and css files;
can be local, default is external which requires wireless connection
key # string or integer. the column number of the "motion" variable (does not have to be time)
x # string or integer. number (integer) or name (text, string) of the x-variable in the chart.
Can later be changed by clicking on the variable in the chart.
Number starts from 0 which is the outer index of the dataframe
y # string or integer. number (integer) or name (text, string) of the x-variable in the chart.
size # name (text, string) or column number (integer)
The variable used to determine the size of the circles
color # name (text, string) or column number (integer)
The variable used to determine the color of the circles
category # name (text, string) or column number (integer)
The variable used to describe the category the observation belongs to.
Example Mid-West, South. Often the same variable as color.
xscale # string. Scale for x-variable, string, default 'linear'.
Possible values 'linear', 'log', 'sqrt', 'log', 'quadnomial', 'ordinal'
yscale # string. Scale for x-variable, string, default 'linear'.
Possible values 'linear', 'log', 'sqrt', 'log', 'quadnomial', 'ordinal'
play # string. 'true' or 'false' (default, false).
Determines whether the motion starts right away or if you have to click play first.
loop # string. 'true' or 'false' (default, false).
Determines whether the motion keeps repeating after one loop over the series, or stops.
width # integer. width of chart in pixels, default 900
height # integer. height of chart in pixels, default 700
varLabels # list. list of labels for columns (default is column headers of dataframe)
Must be of same length as the number of columns in the dataframe, including the index
'''
# This defines the motion chart object.
# Basically just holds the parameters used to create the chart: name of data source, which variables to use
def __init__(self,
df = 'df',
title = "Motion Chart",
url = "http://socr.ucla.edu/htmls/HTML5/MotionChart",
key = 1,
x = 2,
y = 3,
size = 4,
color = 5,
category = 5,
xscale='linear',
yscale='linear',
play = 'true',
loop = 'false',
width = 800,
height = 600,
varLabels=None):
self.df = df
self.title = title
self.url = url
self.key = key
self.x = x
self.y = y
self.size = size
self.color = color
self.category = category
self.xscale= xscale
self.yscale= yscale
self.play = play
self.loop = loop # string: 'true' or 'false' (default, false).
self.width = width # width of chart in pixels, default 800
self.height = height # height of chart in pixels, default 400
self.varLabels = varLabels # list of labels for columns (default is column headers of dataframe
# The informaton from the object is used to generate the HTML string generating the chart
# (inserting the specific information in the object into the template string)
# Note 1: The string is generated in two steps, not one, because future version might want to revise some properties
# without redoing the reformatting and creatingof the dataset from the dataframe
# Note 2: Initially the string itself was saved in the object, although useful sometimes it seems memory greedy
# Note 3: The template string used here is just a revised version of a template somebody else has created
# See Tony Hirst: https://github.com/psychemedia/dataviz4development/tree/master/SocrMotionCharts
def htmlStringStart(self):
socrTemplateStart='''<!DOCTYPE html>
<html>
<head>
<!-- Meta Needed to force IE out of Quirks mode -->
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<!--StyleSheets-->
<link href="{url}/css/bootstrap/bootstrap.min.css" rel="stylesheet">
<link href="{url}/css/jquery-ui-1.8.20.custom.css" rel="stylesheet">
<link href="{url}/css/jquery.handsontable.css" rel="stylesheet">
<link href="{url}/css/jquery.motionchart.css" rel="stylesheet">
<link href="{url}/css/jquery.contextMenu.css" rel="stylesheet">
<!--Scripts-->
<script src="{url}/js/jquery-1.7.2.min.js"></script>
<script src="{url}/js/dependencies.min.js"></script>
<script src="{url}/js/custom-bootstrap.js"></script>
<script src="{url}/js/jquery.handsontable.js"></script>
<script src="{url}/js/jquery.motionchart.js"></script>
</head>
<body>
<script>
var data = {data};
</script>
'''
# In order to make it easy to use information in the index of the dataframe, the index in the passed dataframe is reset
# For instance: If the time variable is in the index of the dataframe, say the outer index, then one would write
# mc = MotionChart(df, key = 0) when specifying the motion chart
# Note that although the key often is time, it does not have to be so (unlike Google Motion Chart)
# In MotionCharts it is basically whatver variable you want to use to define the change
df = self.df.reset_index()
# If variable labels are not specified, the column names of the dataframe is used
# Note. variable levels are specified the list of labels to be used has to have the same number of elements
# as the columns in the reset dataframe (ie. original number of columns plus number of index levels)
if self.varLabels == None:
self.varLabels = df.columns.tolist()
# Here the data is converted from a pandas dataframe to the format which is accepted by the SocrMotion Chart (javascript)
# The starting point is a json string of all the values in the dataframe, which is then modified fit SocrMotionChart
dataValuesString = df.to_json(orient = 'values')
varNamesString = ",".join(['"' + str(var) + '"' for var in self.varLabels])
varNamesString = "[[" + varNamesString + "], ["
dataValuesString = dataValuesString.lstrip("[")
socrDataString = varNamesString + dataValuesString
# The generated string containing the data in the right format, is inserted into the template string
htmlString1 = socrTemplateStart.format(
data = socrDataString,
url = self.url
)
# Change reference to bootstrap.js file if the url is changed to "custom-bootstrap.js"
# The js available on SOCR's webpage which lists it as boostrap.js, but on GitHub version which many use
# to install a local copy, the same file is referred to as custom-boostrap.js
# The default value is to keep it as 'custom-boostrap.js', but if the url points to socr
# (which is default since we want the chart to work on the web), then the filename is changed to 'bootstrap.js'
if self.url == "http://socr.ucla.edu/htmls/HTML5/MotionChart":
htmlString1 = htmlString1.replace("custom-bootstrap.js", "bootstrap.js")
return htmlString1
# Generating the last half of the html string which produces the motion chart
# The reason the string is generated in two halfes, is to open up for revisons in which some options are changed
# without having to transfor and generate the data from the dataframe again.
def htmlStringEnd(self):
socrTemplateEnd = '''<div id="content" align="center">
<div class="motionchart" style="width:{width}px; height:{height}px;"></div>
<script>
$('.motionchart').motionchart({{
title: "{title}",
'data': data,
mappings: {{key: {key}, x: {x}, y: {y},
size: {size}, color: {color}, category: {category} }},
scalings: {{ x: '{xscale}', y: '{yscale}' }},
colorPalette: {{"Blue-Red": {{from: "rgb(0,0,255)", to: "rgb(255,0,0)"}}}},
color: "Red-Blue",
play: {play},
loop: {loop}
}});
</script>
</div>
</body>
</html>
'''
# Rename variables to avoid changing the properties of the object when changing strings to numbers
# (NUmbers are required in the js script)
kkey = self.key
xx = self.x
yy = self.y
ssize = self.size
ccolor = self.color
ccategory = self.category
# The user is free to specify many variables either by location (an integer representing the column number)
# or by name (the column name in the dataframe)
# This means we have to find and replace with column number if the variable is specified as a string since
# the javascript wants integers (note: variable labels must be unique)
# The code below finds and replaces the specified column name (text) with the column number (numeric)
if type(kkey) is str:
kkey=self.varLabels.index(kkey)
if type(xx) is str:
xx=self.varLabels.index(xx)
if type(yy) is str:
yy=self.varLabels.index(yy)
if type(ssize) is str:
ssize=self.varLabels.index(ssize)
if type(ccolor) is str:
ccolor=self.varLabels.index(ccolor)
if type(ccategory) is str:
ccategory=self.varLabels.index(ccategory)
# The properties are inserted into the last half of the template string
htmlString2 = socrTemplateEnd.format(
title = self.title,
key = kkey, x = xx, y = yy, size = ssize, color = ccolor, category = ccategory,
xscale= self.xscale , yscale= self.yscale,
play = self.play, loop = self.loop,
width = self.width, height = self.height)
return htmlString2
# Display the motion chart in the browser (start the default browser)
def to_browser(self):
htmlString = self.htmlStringStart() + self.htmlStringEnd()
path = os.path.abspath('temp.html')
url = 'file://' + path
with open(path, 'w') as f:
f.write(htmlString)
webbrowser.open(url)
# Display the motion chart in the Ipython notebook
# This is saved to a file because in Python 3 it was difficult to encode the string that could be used in HTML directly
# TODO: Eliminate file (security risk to leave the file on disk, and overwrite danger?) and avoid name conflicts.
# Also: What if multiple figures?
def to_notebook(self, width = 900, height = 700):
htmlString = self.htmlStringStart() + self.htmlStringEnd()
path = os.path.abspath('mc_temp.html')
with open(path, 'w') as f:
f.write(htmlString)
display(IFrame(src="mc_temp.html", width = width, height = height))
# Copy the HTML string to the clipboard
def to_clipboard(self):
htmlString = self.htmlStringStart() + self.htmlStringEnd()
pyperclip.copy(htmlString)
# Save the motion chart as a file (inclulde .html manually if desired)
def to_file(self, path_and_name):
htmlString = self.htmlStringStart() + self.htmlStringEnd()
fileName = path_and_name
try: # encode will not (need not!) work in Python 3 since it is unicode already
fileName = fileName.encode('string-escape')
with open(fileName, 'w') as f:
f.write(htmlString)
except:
with open(fileName, 'w') as f:
f.write(htmlString)
# Include a demo option
def MotionChartDemo():
fruitdf = pd.DataFrame([
['Apples', '1988-0-1', 1000, 300, 44,'East'],
['Oranges', '1988-0-1', 1150, 200, 42, 'West'],
['Bananas', '1988-0-1', 300, 250, 35, 'West'],
['Apples', '1989-6-1', 1200, 400, 48, 'East'],
['Oranges', '1989-6-1', 750, 150, 47, 'West'],
['Bananas', '1989-6-1', 788, 617, 45, 'West']])
fruitdf.columns = ['fruit', 'time', 'sales', 'price', 'temperature', 'location']
fruitdf['time'] = pd.to_datetime(fruitdf['time'])
mChart = MotionChart(
df = fruitdf,
url = "http://socr.ucla.edu/htmls/HTML5/MotionChart",
key = 'time',
x = 'price',
y = 'sales',
size = 'temperature',
color = 'fruit',
category = 'location')
mChart.to_browser()
|
gpl-2.0
|
fdiehl/apsis
|
code/setup.py
|
2
|
2733
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
setup(
name='apsis',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.1',
description='Toolkit for hyperparameter optimization for machine learning algorithms.',
long_description='Our goal is to provide a flexible, simple and scaleable approach - parallel, on clusters and/or on your own machine. Check out our usage tutorials to get started or the design pages to understand how apsis works.',
# The project's main homepage.
url='https://github.com/FrederikDiehl/apsis',
# Author details
author='Frederik Diehl, Andreas Jauch',
author_email= '[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='bayesian optimization machine learning',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'documentation', 'diagrams']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['scipy', 'numpy', 'gpy>=0.6.0', 'matplotlib'],
package_data={
'apsis': ['config/*'],
},
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'test': ['coverage', 'nosetests'],
'demos': ['sklearn']
},
)
|
mit
|
ryfeus/lambda-packs
|
Tensorflow_LightGBM_Scipy_nightly/source/scipy/integrate/odepack.py
|
62
|
9420
|
# Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t0, ...)
where y can be a vector.
*Note*: The first two arguments of ``func(y, t0, ...)`` are in the
opposite order of the arguments in the system definition function used
by the `scipy.integrate.ode` class.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and it initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We generate a solution 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
|
mit
|
minixalpha/spark
|
python/setup.py
|
11
|
9765
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas>=0.13.0']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
|
apache-2.0
|
hvanhovell/spark
|
examples/src/main/python/sql/arrow.py
|
8
|
8426
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def scalar_iter_pandas_udf_example(spark):
# $example on:scalar_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# When the UDF is called with a single column that is not StructType,
# the input to the underlying function is an iterator of pd.Series.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_one(batch_iter):
for x in batch_iter:
yield x + 1
df.select(plus_one(col("x"))).show()
# +-----------+
# |plus_one(x)|
# +-----------+
# | 2|
# | 3|
# | 4|
# +-----------+
# When the UDF is called with more than one columns,
# the input to the underlying function is an iterator of pd.Series tuple.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_cols(batch_iter):
for a, b in batch_iter:
yield a * b
df.select(multiply_two_cols(col("x"), col("x"))).show()
# +-----------------------+
# |multiply_two_cols(x, x)|
# +-----------------------+
# | 1|
# | 4|
# | 9|
# +-----------------------+
# When the UDF is called with a single column that is StructType,
# the input to the underlying function is an iterator of pd.DataFrame.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_nested_cols(pdf_iter):
for pdf in pdf_iter:
yield pdf["a"] * pdf["b"]
df.select(
multiply_two_nested_cols(
struct(col("x").alias("a"), col("x").alias("b"))
).alias("y")
).show()
# +---+
# | y|
# +---+
# | 1|
# | 4|
# | 9|
# +---+
# In the UDF, you can initialize some states before processing batches.
# Wrap your code with try/finally or use context managers to ensure
# the release of resources at the end.
y_bc = spark.sparkContext.broadcast(1)
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_y(batch_iter):
y = y_bc.value # initialize states
try:
for x in batch_iter:
yield x + y
finally:
pass # release resources here, if any
df.select(plus_y(col("x"))).show()
# +---------+
# |plus_y(x)|
# +---------+
# | 2|
# | 3|
# | 4|
# +---------+
# $example off:scalar_iter_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(subtract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
# $example on:grouped_agg_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def mean_udf(v):
return v.mean()
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
# $example off:grouped_agg_pandas_udf$
def map_iter_pandas_udf_example(spark):
# $example on:map_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
@pandas_udf(df.schema, PandasUDFType.MAP_ITER)
def filter_func(batch_iter):
for pdf in batch_iter:
yield pdf[pdf.id == 1]
df.mapInPandas(filter_func).show()
# +---+---+
# | id|age|
# +---+---+
# | 1| 21|
# +---+---+
# $example off:map_iter_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf scalar iterator example")
scalar_iter_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
print("Running pandas_udf grouped agg example")
grouped_agg_pandas_udf_example(spark)
print("Running pandas_udf map iterator example")
map_iter_pandas_udf_example(spark)
spark.stop()
|
apache-2.0
|
mathdd/numpy
|
numpy/linalg/linalg.py
|
31
|
75612
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
|
bsd-3-clause
|
hugobuddel/orange3
|
Orange/widgets/classify/owclassificationtreegraph.py
|
2
|
18846
|
import sys
import numpy
from sklearn.tree._tree import TREE_LEAF
from Orange.widgets.classify.owtreeviewer2d import *
from Orange.data import Table
from Orange.classification.tree import TreeClassifier
from Orange.preprocess.transformation import Indicator
from Orange.widgets.utils.colorpalette import ColorPaletteDlg
from Orange.widgets.settings import \
Setting, ContextSetting, ClassValuesContextHandler
from Orange.widgets import gui
class OWClassificationTreeGraph(OWTreeViewer2D):
name = "Classification Tree Viewer"
description = "Graphical visualization of a classification tree."
icon = "icons/ClassificationTree.svg"
settingsHandler = ClassValuesContextHandler()
target_class_index = ContextSetting(0)
color_settings = Setting(None)
selected_color_settings_index = Setting(0)
inputs = [("Classification Tree", TreeClassifier, "ctree")]
outputs = [("Data", Table)]
def __init__(self):
super().__init__()
self.domain = None
self.classifier = None
self.dataset = None
self.clf_dataset = None
self.scene = TreeGraphicsScene(self)
self.scene_view = TreeGraphicsView(self.scene)
self.scene_view.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.mainArea.layout().addWidget(self.scene_view)
self.toggle_zoom_slider()
self.scene.selectionChanged.connect(self.update_selection)
box = gui.widgetBox(self.controlArea, "Nodes", addSpace=True)
self.target_combo = gui.comboBox(
box, self, "target_class_index", orientation=0, items=[],
label="Target class", callback=self.toggle_target_class,
contentsLength=8)
gui.separator(box)
gui.button(box, self, "Set Colors", callback=self.set_colors)
dlg = self.create_color_dialog()
self.scene.colorPalette = dlg.getDiscretePalette("colorPalette")
gui.rubber(self.controlArea)
def sendReport(self):
if self.tree:
tclass = str(self.targetCombo.currentText())
tsize = "%i nodes, %i leaves" % (orngTree.countNodes(self.tree),
orngTree.countLeaves(self.tree))
else:
tclass = tsize = "N/A"
self.reportSettings(
"Information",
[("Target class", tclass),
("Line widths",
["Constant", "Proportion of all instances",
"Proportion of parent's instances"][self.line_width_method]),
("Tree size", tsize)])
super().sendReport()
def set_colors(self):
dlg = self.create_color_dialog()
if dlg.exec_():
self.color_settings = dlg.getColorSchemas()
self.selected_color_settings_index = dlg.selectedSchemaIndex
self.scene.colorPalette = dlg.getDiscretePalette("colorPalette")
self.scene.update()
self.toggle_node_color()
def create_color_dialog(self):
c = ColorPaletteDlg(self, "Color Palette")
c.createDiscretePalette("colorPalette", "Discrete Palette")
c.setColorSchemas(self.color_settings,
self.selected_color_settings_index)
return c
def set_node_info(self):
for node in self.scene.nodes():
node.set_rect(QRectF())
self.update_node_info(node)
w = max([n.rect().width() for n in self.scene.nodes()] + [0])
if w > self.max_node_width:
w = self.max_node_width
for node in self.scene.nodes():
node.set_rect(QRectF(node.rect().x(), node.rect().y(),
w, node.rect().height()))
self.scene.fix_pos(self.root_node, 10, 10)
def update_node_info(self, node):
distr = node.get_distribution()
total = int(node.num_instances())
if self.target_class_index:
tabs = distr[self.target_class_index - 1]
text = ""
else:
modus = node.majority()
tabs = distr[modus]
text = self.domain.class_vars[0].values[modus] + "<br/>"
if tabs > 0.999:
text += "100%, {}/{}".format(total, total)
else:
text += "{:2.1f}%, {}/{}".format(100 * tabs,
int(total * tabs), total)
if not node.is_leaf():
attribute = self.domain.attributes[node.attribute()]
if isinstance(attribute.compute_value, Indicator):
attribute = attribute.compute_value.variable
text += "<hr/>{}".format(attribute.name)
node.setHtml('<p style="line-height: 120%; margin-bottom: 0">'
'{}</p>'.
format(text))
def activate_loaded_settings(self):
if not self.tree:
return
super().activate_loaded_settings()
self.set_node_info()
self.toggle_node_color()
def toggle_node_size(self):
self.set_node_info()
self.scene.update()
self.scene_view.repaint()
def toggle_node_color(self):
palette = self.scene.colorPalette
for node in self.scene.nodes():
distr = node.get_distribution()
total = numpy.sum(distr)
if self.target_class_index:
p = distr[self.target_class_index - 1] / total
color = palette[self.target_class_index].light(200 - 100 * p)
else:
modus = node.majority()
p = distr[modus] / (total or 1)
color = palette[int(modus)].light(400 - 300 * p)
node.backgroundBrush = QBrush(color)
self.scene.update()
def toggle_target_class(self):
self.toggle_node_color()
self.set_node_info()
self.scene.update()
def ctree(self, clf=None):
self.clear()
self.closeContext()
self.classifier = clf
if clf is None:
self.info.setText('No tree.')
self.tree = None
self.root_node = None
self.dataset = None
else:
self.tree = clf.skl_model.tree_
self.domain = clf.domain
self.dataset = getattr(clf, "instances", None)
if self.dataset is not None and self.dataset.domain != self.domain:
self.clf_dataset = \
Table.from_table(self.classifier.domain, self.dataset)
else:
self.clf_dataset = self.dataset
self.target_combo.clear()
self.target_combo.addItem("None")
self.target_combo.addItems(self.domain.class_vars[0].values)
self.target_class_index = 0
self.openContext(self.domain.class_var)
self.root_node = self.walkcreate(self.tree, 0, None)
self.info.setText(
'{} nodes, {} leaves'.
format(self.tree.node_count,
numpy.count_nonzero(
self.tree.children_left == TREE_LEAF)))
self.scene.fix_pos(self.root_node, self._HSPACING, self._VSPACING)
self.activate_loaded_settings()
self.scene_view.centerOn(self.root_node.x(), self.root_node.y())
self.update_node_tooltips()
self.scene.update()
self.send("Data", None)
def walkcreate(self, tree, node_id, parent=None):
node = ClassificationTreeNode(tree, self.domain, parent, None,
self.scene, i=node_id)
if parent:
parent.graph_add_edge(
GraphicsEdge(None, self.scene, node1=parent, node2=node))
left_child_index = tree.children_left[node_id]
right_child_index = tree.children_right[node_id]
if left_child_index != TREE_LEAF:
self.walkcreate(tree, node_id=left_child_index, parent=node)
if right_child_index != TREE_LEAF:
self.walkcreate(tree, node_id=right_child_index, parent=node)
return node
def node_tooltip(self, node):
if node.i > 0:
text = " AND<br/>".join(
"%s %s %s" % (n, s, v) for n, s, v in node.rule())
else:
text = "Root"
return text
def update_selection(self):
if self.dataset is None or self.classifier is None or self.tree is None:
return
items = [item for item in self.scene.selectedItems()
if isinstance(item, ClassificationTreeNode)]
selected_leaves = [_leaf_indices(self.tree, item.node_id)
for item in items]
if selected_leaves:
selected_leaves = numpy.unique(numpy.hstack(selected_leaves))
all_leaves = _leaf_indices(self.tree, 0)
if len(selected_leaves) > 0:
ind = numpy.searchsorted(all_leaves, selected_leaves, side="left")
leaf_samples = _assign_samples(self.tree, self.clf_dataset.X)
leaf_samples = [leaf_samples[i] for i in ind]
indices = numpy.hstack(leaf_samples)
else:
indices = []
if len(indices):
data = self.dataset[indices]
else:
data = None
self.send("Data", data)
class PieChart(QGraphicsRectItem):
def __init__(self, dist, r, parent, scene):
super().__init__(parent, scene)
self.dist = dist
self.r = r
# noinspection PyPep8Naming
def setR(self, r):
self.prepareGeometryChange()
self.r = r
def boundingRect(self):
return QRectF(-self.r, -self.r, 2*self.r, 2*self.r)
def paint(self, painter, option, widget=None):
dist_sum = sum(self.dist)
start_angle = 0
colors = self.scene().colorPalette
for i in range(len(self.dist)):
angle = self.dist[i] * 16 * 360. / dist_sum
if angle == 0:
continue
painter.setBrush(QBrush(colors[i]))
painter.setPen(QPen(colors[i]))
painter.drawPie(-self.r, -self.r, 2 * self.r, 2 * self.r,
int(start_angle), int(angle))
start_angle += angle
painter.setPen(QPen(Qt.white))
painter.setBrush(QBrush())
painter.drawEllipse(-self.r, -self.r, 2 * self.r, 2 * self.r)
def _subnode_range(tree, node_id):
right = left = node_id
if tree.children_left[left] == TREE_LEAF:
assert tree.children_right[node_id] == TREE_LEAF
return node_id, node_id
else:
left = tree.children_left[left]
# run down to the right most node
while tree.children_right[right] != TREE_LEAF:
right = tree.children_right[right]
return left, right + 1
def _leaf_indices(tree, node_id):
start, stop = _subnode_range(tree, node_id)
if start == stop:
# leaf
return numpy.array([node_id], dtype=int)
else:
isleaf = tree.children_left[start: stop] == TREE_LEAF
assert numpy.flatnonzero(isleaf).size > 0
return start + numpy.flatnonzero(isleaf)
def _assign_samples(tree, X):
def assign(node_id, indices):
if tree.children_left[node_id] == TREE_LEAF:
return [indices]
else:
feature_idx = tree.feature[node_id]
thresh = tree.threshold[node_id]
column = X[indices, feature_idx]
leftmask = column <= thresh
leftind = assign(tree.children_left[node_id], indices[leftmask])
rightind = assign(tree.children_right[node_id], indices[~leftmask])
return list.__iadd__(leftind, rightind)
N, _ = X.shape
items = numpy.arange(N, dtype=int)
leaf_indices = assign(0, items)
return leaf_indices
class ClassificationTreeNode(GraphicsNode):
def __init__(self, tree, domain, parent=None, parent_item=None,
scene=None, i=0, distr=None):
super().__init__(tree, parent, parent_item, scene)
self.distribution = distr
self.tree = tree
self.domain = domain
self.i = i
self.node_id = i
self.parent = parent
self.pie = PieChart(self.get_distribution(), 8, self, scene)
fm = QFontMetrics(self.document().defaultFont())
self.attr_text_w = fm.width(str(self.attribute() if self.attribute()
else ""))
self.attr_text_h = fm.lineSpacing()
self.line_descent = fm.descent()
self._rect = None
def get_distribution(self):
"""
:return: Distribution of class values.
"""
if self.is_leaf():
counts = self.tree.value[self.node_id]
else:
leaf_ind = _leaf_indices(self.tree, self.node_id)
values = self.tree.value[leaf_ind]
counts = numpy.sum(values, axis=0)
assert counts.shape[0] == 1, "n_outputs > 1 "
counts = counts[0]
counts_sum = numpy.sum(counts)
if counts_sum > 0:
counts /= counts_sum
return counts
def num_instances(self):
"""
:return: Number of instances in a particular node.
"""
return self.tree.n_node_samples[self.i]
def split_condition(self):
"""
:return: split condition to reach a particular node.
"""
if self.i > 0:
attribute = self.domain.attributes[self.attribute()]
parent_attr = self.domain.attributes[self.parent.attribute()]
parent_attr_cv = parent_attr.compute_value
is_left_child = self.tree.children_left[self.parent.i] == self.i
if isinstance(parent_attr_cv, Indicator) and \
hasattr(parent_attr_cv.variable, "values"):
values = parent_attr_cv.variable.values
return values[abs(parent_attr_cv.value - is_left_child)] \
if len(values) == 2 \
else "≠ " * is_left_child + values[parent_attr_cv.value]
else:
thresh = self.tree.threshold[self.parent.i]
return "%s %s" % ([">", "<="][is_left_child],
attribute.str_val(thresh))
else:
return ""
def rule(self):
"""
:return:
Rule to reach node as list of tuples (attr index, sign, threshold)
"""
# TODO: this is easily extended to Classification Rules-compatible form
return self.rulew(i=self.i)
def rulew(self, i=0):
"""
:param i:
Index of current node.
:return:
Rule to reach node i, represented as list of tuples (attr name,
sign, threshold)
"""
if i > 0:
parent_attr = self.domain.attributes[self.parent.attribute()]
parent_attr_cv = parent_attr.compute_value
is_left_child = self.tree.children_left[self.parent.i] == i
pr = self.parent.rule()
if isinstance(parent_attr_cv, Indicator) and \
hasattr(parent_attr_cv.variable, "values"):
values = parent_attr_cv.variable.values
attr_name = parent_attr_cv.variable.name
sign = ["=", "≠"][is_left_child * (len(values) != 2)]
value = values[abs(parent_attr_cv.value -
is_left_child * (len(values) == 2))]
else:
attr_name = parent_attr.name
sign = [">", "<="][is_left_child]
value = "%.3f" % self.tree.threshold[self.parent.i]
pr.append((attr_name, sign, value))
return pr
else:
return []
def is_leaf(self):
"""
:return: Node is leaf
"""
return self.tree.children_left[self.node_id] < 0 and \
self.tree.children_right[self.node_id] < 0
def attribute(self):
"""
:return: Node attribute index.
"""
return self.tree.feature[self.node_id]
def majority(self):
"""
:return:
Majority class at node.
"""
return numpy.argmax(self.get_distribution())
def update_contents(self):
self.prepareGeometryChange()
self.setTextWidth(-1)
self.setTextWidth(self.document().idealWidth())
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.droplet.setVisible(bool(self.branches))
self.pie.setPos(self.rect().right(), self.rect().center().y())
fm = QFontMetrics(self.document().defaultFont())
self.attr_text_w = fm.width(str(self.attribute() if self.attribute()
else ""))
self.attr_text_h = fm.lineSpacing()
self.line_descent = fm.descent()
def rect(self):
if self._rect and self._rect.isValid():
return self._rect
else:
return QRectF(QPointF(0, 0), self.document().size()).\
adjusted(0, 0, 8, 0) | \
(getattr(self, "_rect") or QRectF(0, 0, 1, 1))
def set_rect(self, rect):
self.prepareGeometryChange()
rect = QRectF() if rect is None else rect
self._rect = rect
self.setTextWidth(-1)
self.update_contents()
self.update()
def boundingRect(self):
if hasattr(self, "attr"):
attr_rect = QRectF(QPointF(0, -self.attr_text_h),
QSizeF(self.attr_text_w, self.attr_text_h))
else:
attr_rect = QRectF(0, 0, 1, 1)
rect = self.rect().adjusted(-5, -5, 5, 5)
return rect | attr_rect
def paint(self, painter, option, widget=None):
rect = self.rect()
if self.isSelected():
option.state ^= QStyle.State_Selected
painter.setFont(self.document().defaultFont())
draw_text = str(self.split_condition())
painter.drawText(QPointF(4, -self.line_descent - 1), draw_text)
painter.save()
painter.setBrush(self.backgroundBrush)
if self.isSelected():
painter.setPen(QPen(QBrush(Qt.black), 2))
else:
painter.setPen(QPen(Qt.gray))
if self.is_leaf():
painter.drawRect(rect.adjusted(-3, 0, 0, 0))
else:
painter.drawRoundedRect(rect.adjusted(-3, 0, 0, 0), 4, 4)
painter.restore()
painter.setClipRect(rect)
return QGraphicsTextItem.paint(self, painter, option, widget)
if __name__ == "__main__":
from Orange.classification.tree import TreeLearner
a = QApplication(sys.argv)
ow = OWClassificationTreeGraph()
data = Table("iris")
clf = TreeLearner(max_depth=3)(data)
clf.instances = data
ow.ctree(clf)
ow.show()
ow.raise_()
a.exec_()
ow.saveSettings()
|
gpl-3.0
|
f3r/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
DrSkippy/Data-Science-45min-Intros
|
support-vector-machines-101/svm-example.py
|
26
|
2219
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
|
unlicense
|
hdmetor/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
262
|
7954
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
ericmjl/bokeh
|
examples/reference/models/radio_group_server.py
|
1
|
1256
|
## Bokeh server for Radio Group
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, RadioGroup
from bokeh.plotting import figure
x=[3,4,6,12,10,1,5,6,3,8]
y=[7,1,3,4,1,6,10,4,10,3]
label=['Red', 'Orange', 'Red', 'Orange','Red', 'Orange','Red', 'Orange','Red', 'Orange',]
df=pd.DataFrame({'x':x,'y':y,'label':label})
source = ColumnDataSource(data=dict(x=df.x, y=df.y,label=df.label))
plot_figure = figure(title='Radio Group',plot_height=450, plot_width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y',color='label', source=source, size=10)
radio_group = RadioGroup(labels=["Red", "Orange"])
def radiogroup_click(attr,old,new):
active_radio=radio_group.active ##Getting radio button value
# filter the dataframe with value in radio-button
if active_radio==0:
selected_df = df[df['label'] == 'Red']
elif active_radio==1:
selected_df = df[df['label'] == "Orange"]
source.data=dict(x=selected_df.x, y=selected_df.y,label=selected_df.label)
radio_group.on_change('active',radiogroup_click)
layout=row(radio_group, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Radio Group Bokeh Server"
|
bsd-3-clause
|
scholz/toyclassifiers
|
toygaussiannb.py
|
1
|
5702
|
#!/usr/bin/python2.7
import numpy as np
from abstractclassifier import AbstractClassifier
class ToyGaussianNB(AbstractClassifier):
"""
Toy Gaussian Naive Bayes (GaussianNB)
Algorithm
---------
- Training
- Compute priors based on prevalence of classes in train_data
- Compute mean and variance per class per feature
- Classification
- Compute the probability of an instance belonging to a specific class by:
- Iterating over all features and multiplying each iteration together,
where each iteration is a product of:
- the prior for the investigated class
- the probability that the instance value comes from a normal distribution (gaussian)
created using the mean and variance derived during training for this feature
- To yield a valid probability (x in {0..1}) to which class this instance belongs
the sum of all probability products for each must be divided by the individuals products
- The class with the highest probability is chosen as result for this instance
Note: In the code mulitplications are replaced by summation since we are working with
logarithms to avoid problems with small numbers.
Used Documentation
------------------
- http://www.cs.cmu.edu/~epxing/Class/10701-10s/Lecture/lecture5.pdf (using gaussian for continuous variables)
- http://scikit-learn.org/stable/modules/naive_bayes.html
- http://github.com/scikit-learn/scikit-learn/blob/a95203b/sklearn/naive_bayes.py (prevent std==0 by using std+=eps)
Note: lots of optimization potential, this code is approx. 60x slower than sklearn gaussian NB
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
class_names_ : array, shape (n_classes,)
name of each class
class_priors_ : array, shape (n_classes,)
prior of each class
class_feature_means_ : array, shape (n_classes, n_features)
mean of each feature per class
class_feature_vars_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from toygaussiannb import ToyGaussianNB
>>> clf = ToyGaussianNB()
>>> clf.fit(X, Y)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
eps=1e-9
def __init__(self):
self.class_names_=[]
self.class_priors_=[]
self.class_feature_means_=[]
self.class_feature_vars_=[]
def gaussian(self, x,mean,var):
prob_x= np.log( 1./np.sqrt(2*np.pi * var) ) - 0.5* ((x-mean)**2/var)
return prob_x
def fit(self, training_features, training_labels):
classes=np.array(training_labels)
# ----------------------------------------------- #
# compute prior probabilities #
# ----------------------------------------------- #
for c in classes:
if c not in self.class_names_:
self.class_names_.append(c)
self.class_priors_.append(1.)
else:
self.class_priors_[self.class_names_.index(c)]+=1.
self.class_priors_=np.array(self.class_priors_, dtype=float)
self.class_priors_/=len(classes)
# ----------------------------------------------- #
# compute mean and variance per class per feature #
# ----------------------------------------------- #
m,n=training_features.shape
self.class_feature_means_=np.zeros((len(self.class_names_),n), dtype=float)
self.class_feature_vars_=np.zeros((len(self.class_names_),n), dtype=float)
for f in range(0,n):
f_vect=training_features[:,f]
for c in range(len(self.class_names_)):
self.class_feature_means_[c, f]=np.mean(f_vect[classes==self.class_names_[c]])
self.class_feature_vars_[c, f]=np.var(f_vect[classes==self.class_names_[c]])+self.eps
def predict(self, predict):
# ----------------------------------------------- #
# predict classes on predict DS #
# ----------------------------------------------- #
m,n=predict.shape
res=[]
res_proba=[]
# for every row
for r in range(0,m):
# result vector for this row will have a log likelihood for each class
posteriori=np.log(self.class_priors_)
# for every feature
for f in range(0,n):
# for each class
for c in range(len(self.class_names_)):
posteriori[c]+=self.gaussian(predict[r,f], self.class_feature_means_[c,f], self.class_feature_vars_[c,f])
# argmax c (extract name of class with maximal log likelihood)
res.append(self.class_names_[np.where(posteriori==max(posteriori))[0]])
res_proba.append(posteriori)
# iterate over result to build result array
return(res, res_proba)
def __repr__(self):
return "class names: %s\nclass priors: %s\nmeans: %s\nvars: %s"%(self.class_names_, self.class_priors_, self.class_feature_means_, self_class_feature_vars_)
|
mit
|
702nADOS/sumo
|
docs/tutorial/san_pablo_dam/data/analyzeData.py
|
1
|
4210
|
"""
@file analyzeData.py
@author Daniel Krajzewicz
@author Laura Bieker
@date 2011-09-30
@version $Id: analyzeData.py 22608 2017-01-17 06:28:54Z behrisch $
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import math
import numpy as np
def getAttr(line, which):
beg = line.find(which)
beg = line.find('"', beg)
end = line.find('"', beg + 1)
return line[beg + 1:end]
# this is from here: http://code.activestate.com/recipes/389639
class Ddict(dict):
def __init__(self, default=None):
self.default = default
def __getitem__(self, key):
if key not in self:
self[key] = self.default()
return dict.__getitem__(self, key)
# os.system('run-an-external-command')
# os.getcwd()
# os.chdir()
f = open(sys.argv[1], 'r')
data = f.readlines()
f.close()
dd = Ddict(lambda: Ddict(lambda: 0))
# f1 = open('raw-results.txt','w')
f1 = open('tmp.txt', 'w')
for i in range(1, len(data)):
if data[i].find('<interval') != -1:
ll = data[i].split('"')
nn = int(getAttr(data[i], "nVehContrib")) # int(ll[7])
lane = int(getAttr(data[i], "id")[-1:]) # int(ll[5])
tt = float(getAttr(data[i], "begin")) # float(ll[1])
itt = int(tt)
if nn > 0:
print(tt, lane, nn, ll[9], ll[11], ll[13], ll[15], file=f1)
dd[itt][lane] = nn
f1.close()
maxLanes = 0
dt2OneHour = 6.0
for t in dd.iterkeys():
if len(dd[t]) > maxLanes:
maxLanes = len(dd[t])
tVec = np.zeros(len(dd), dtype=int)
QVec = np.zeros(len(dd), dtype=int)
xVec = np.zeros((len(dd), maxLanes), dtype=float)
qVec = np.zeros((len(dd), maxLanes), dtype=float)
vecIndx = 0
f = open('lane-shares.txt', 'w')
# for t,v in dd.items():
for t in sorted(dd.iterkeys()):
# qTot = math.fsum(dd[t])
qTot = sum(dd[t].values())
nrm = 0.0
if qTot:
nrm = 1.0 / qTot
s = repr(t) + ' ' + repr(qTot) + ' '
tVec[vecIndx] = t
QVec[vecIndx] = dt2OneHour * qTot
for lane in range(maxLanes):
share = 0.0
if lane in dd[t]:
share = nrm * dd[t][lane]
s = s + repr(share) + ' '
xVec[vecIndx, lane] = share
qVec[vecIndx, lane] = dt2OneHour * dd[t][lane]
# print >> f,t,qTot,lane,share
vecIndx += 1
print(s, file=f)
f.close()
try:
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
# y =
n = len(qVec)
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(tVec, qVec[range(n), lane], label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane flows')
plt.xlabel('time [s]')
plt.legend()
bname = 'flows-over-time-' + repr(maxLanes)
plt.savefig(bname + '.eps')
plt.savefig(bname + '.pdf')
plt.savefig(bname + '.png')
plt.savefig(bname + '.svg')
# try:
# import pyemf
# plt.savefig('shares-over-time.emf')
# except :
# print '# no emf support'
# plt.show()
plt.close()
# next plot:
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(QVec, xVec[range(n), lane], 'o', markersize=10, label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane shares')
plt.xlabel('total flow [veh/h]')
plt.legend()
bname = 'shares-vs-flow-' + repr(maxLanes)
plt.savefig(bname + '.eps')
plt.savefig(bname + '.pdf')
plt.savefig(bname + '.png')
plt.savefig(bname + '.svg')
# plt.show()
plt.close()
except ImportError:
print('no matplotlib, falling back to gnuplot')
os.system('gnuplot do-some-plots.gnu')
|
gpl-3.0
|
elenbert/allsky
|
src/webdatagen/system-sensors.py
|
1
|
2514
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import MySQLdb
import sys
import config
def plot_cpu_temperature(sensor_data, output_file):
xdata = []
ydata = []
print 'Plotting cpu temperature graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata.append(row[1])
temper = np.array(ydata)
plt.title('CPU temperature: ' + str(ydata[-1]) + ' C\n')
plt.plot(xdata, temper, label = "Temperature", color="red")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.legend()
plt.ylabel('Temperature C')
plt.grid(True)
plt.tight_layout()
plt.savefig(output_file, dpi=120)
print 'Graph saved as ' + output_file
plt.gcf().clear()
def plot_internal_climate(sensor_data, output_file):
xdata = []
ydata_temper = []
ydata_humidity = []
print 'Plotting internal temperature/humidity graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata_temper.append(row[1])
ydata_humidity.append(row[2])
temper = np.array(ydata_temper)
humid = np.array(ydata_humidity)
plt.subplot(211)
plt.title('Box air temperature and humidity\nCurrent temperature: '
+ str(ydata_temper[-1]) + ' C\nCurrent humidity: ' + str(ydata_humidity[-1]) + ' %\n')
plt.plot(xdata, temper, label = "Temperature")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.legend()
plt.ylabel('Temperature C')
plt.grid(True)
plt.tight_layout()
plt.subplot(212)
plt.plot(xdata, humid, label = "Humidity", color='green')
plt.xlabel('Time period: ' + str(xdata[0].date()) \
+ ' - ' + str((xdata[len(xdata)-1]).date()))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.grid(True)
plt.legend()
plt.ylabel('Humidity %')
plt.tight_layout()
plt.savefig(output_file, dpi=120)
print 'Graph saved as ' + output_file
plt.gcf().clear()
db = MySQLdb.connect(host=config.MYSQL_HOST, user=config.MYSQL_USER, \
passwd=config.MYSQL_PASSWORD, db=config.MYSQL_DB, connect_timeout=90)
cur = db.cursor()
print 'Selecting data from db'
cur.execute("SELECT * from cpu_sensor WHERE time >= NOW() - INTERVAL 1 DAY")
plot_cpu_temperature(cur.fetchall(), output_file=config.PLOT_CPU_TEMPERATURE_DAY)
cur.execute("SELECT * from internal_dh22 WHERE time >= NOW() - INTERVAL 1 DAY")
plot_internal_climate(cur.fetchall(), output_file=config.PLOT_INTERNAL_DH22_DAY)
db.close()
print 'Done\n'
|
gpl-2.0
|
Tjorriemorrie/trading
|
09_scalping/learn-lr.py
|
1
|
1254
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from pprint import pprint
X = np.array([
[1, 3, 5],
[1, 2, 3],
[2, 5, 7],
[2, 3, 6],
])
#pprint(sample)
print '\nX Shape'
print X.shape
# Split the data into training/testing sets
X_train = X[:2]
X_test = X[-2:]
print '\nX_train'
pprint(X_train)
print '\nX_test'
pprint(X_test)
# Split the targets into training/testing sets
Y = np.array([
[3, 5, 8],
[3, 6, 9],
[1, 2, 5],
[2, 4, 8],
])
print '\nY Shape'
print Y.shape
Y_train = Y[:-2]
Y_test = Y[-2:]
print '\nY_train'
pprint(Y_train)
print '\nY_test'
pprint(Y_test)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(X_test) - Y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(X_test, Y_test))
# Plot outputs
plt.scatter(X_test, Y_test, color='black')
plt.plot(X_test, regr.predict(X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
mit
|
gaapt/opencog
|
opencog/python/utility/functions.py
|
34
|
11056
|
from math import fabs, isnan
from datetime import datetime
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.numeric.globals import EPSILON
from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
from scipy.integrate import quad
__author__ = 'keyvan'
def integral(function, start, end):
if hasattr(function, 'integral'):
return function.integral(start, end)
area, error = quad(function, start, end)
return area
def almost_equals(a, b, epsilon=EPSILON):
if fabs(a - b) < epsilon:
return True
return False
def invoke_method_on(method, sequence_or_point):
if method is None:
return None
if not callable(method):
raise TypeError("'method' is not callable")
result = []
try:
for point in sequence_or_point:
if type(point) is datetime:
point = UnixTime(point)
result.append(method(point))
except TypeError:
if type(sequence_or_point) is datetime:
sequence_or_point = UnixTime(sequence_or_point)
return method(sequence_or_point)
return result
def index_of_first_local_maximum(sequence):
first_time = True
index = 0
for element in sequence:
if first_time:
previous = element
first_time = False
continue
if element <= previous:
return index
previous = element
index += 1
return None
class Function(object):
_domain = None
_range = None
_function_undefined = None
def __init__(self, function_undefined=None, domain=None):
if function_undefined is not None:
self.function_undefined = function_undefined
if domain is not None:
if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'):
raise TypeError("'domain' should be iterable and support indexing")
self._domain = domain
def call_on_single_point(self, x):
"""
to override, __call__ invokes this to handle both points and sequences
"""
return 0
def derivative(self, point):
return None
def _check_domain_for(self, feature_name):
if self.domain is None:
raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format(
self.__class__.__name__, feature_name))
def plot(self, plt=None):
self._check_domain_for('plotting')
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.domain, self.range)
return plt
@property
def function_undefined(self):
return self._function_undefined
@function_undefined.setter
def function_undefined(self, value):
if value is not None and not isinstance(value, Function):
raise TypeError("'function_undefined' should be of type 'Function'")
self._function_undefined = value
@property
def domain(self):
return self._domain
@property
def range(self):
return self()
def __call__(self, x=None):
if x is None:
self._check_domain_for("call with 'None'")
x = self.domain
return invoke_method_on(self.call_on_single_point, x)
def __getitem__(self, index):
self._check_domain_for('indexing')
return self.range[index]
def __len__(self):
self._check_domain_for('len()')
return len(self.range)
def __iter__(self):
self._check_domain_for('iter()')
return iter(self.range)
def __reversed__(self):
self._check_domain_for('reversed()')
return reversed(self.range)
class FunctionLinear(Function):
def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None):
#(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)])
if (a, b) == (None, None):
a = (float(y_1) - y_0) / (x_1 - x_0)
b = y_0 - a * x_0
if isnan(a) or isnan(b):
pass
self.a = a
self.b = b
def call_on_single_point(self, x):
return float(self.a * x + self.b)
def intersect(self, other):
if almost_equals(self.a, other.a):
return None
x = (float(other.b) - self.b) / (self.a - other.a)
return x, self(x)
def integral(self, start, end):
if start >= end:
return 0
if self.a == 0:
return self.b * (end - start)
x_intercept = self.x_intercept
if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept):
return (self(start) + self(end)) * (end - start) / 2.0
minus_triangle = (x_intercept - start) * self(start)
plus_triangle = (end - x_intercept) * self(end)
return minus_triangle + plus_triangle
def derivative(self, point):
return self.a
@property
def x_intercept(self):
return - float(self.b) / self.a
@property
def y_intercept(self):
return self(0)
class FunctionHorizontalLinear(FunctionLinear):
def __init__(self, y_intercept):
FunctionLinear.__init__(self, a=0, b=y_intercept)
def call_on_single_point(self, x):
return self.b
def integral(self, start, end):
if start >= end:
return 0
if almost_equals(self.b, 0):
return 0
return float(self.b) * (end - start)
def derivative(self, point):
return 0
FUNCTION_ZERO = FunctionHorizontalLinear(0)
FUNCTION_ONE = FunctionHorizontalLinear(1)
class FunctionComposite(Function):
is_normalised = False
def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False):
if is_normalised is not False:
self.is_normalised = True
Function.__init__(self, function_undefined=function_undefined, domain=domain)
if not isinstance(dictionary_bounds_function, dict):
raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) "
"tuple keys and values of type 'Function'")
self._dictionary_bounds_function = dictionary_bounds_function
def call_on_single_point(self, x):
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= x:
if b >= x:
if self.dictionary_bounds_function[function_bounds] is None:
return None
return self.dictionary_bounds_function[function_bounds](x)
return self.function_undefined(x)
def integral(self, start, end):
if self.is_normalised and self.domain is not None:
if (start < self.domain[0] or almost_equals(start, self.domain[0])) and (
end > self.domain[-1] or almost_equals(end, self.domain[-1])):
return 1.0
if start >= end:
return 0
result = 0
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= start:
if b >= end:
return self.dictionary_bounds_function[function_bounds].integral(start, end)
not_ordered = {
(start, 0): 's', (end, 0): 'e',
(a, 1): 'a', (b, 1): 'b'
}
order = ''.join([not_ordered[i] for i in sorted(not_ordered)])
if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe':
continue
if order in 'seab abse':
continue
if order == 'saeb':
b = end
elif order == 'asbe':
a = start
result += self.dictionary_bounds_function[function_bounds].integral(a, b)
return result
def find_bounds_for(self, point):
for bounds in self.dictionary_bounds_function:
(a, b) = bounds
if a <= point and b >= point:
return bounds
def derivative(self, point):
return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point)
def function_in_point(self, point):
for bounds in self.dictionary_bounds_function:
a, b = bounds
if a <= point <= b:
return self.dictionary_bounds_function[bounds]
return None
# def functions_in_interval(self, interval_start, interval_end):
# dictionary_bounds_function = {}
# for bounds in self.dictionary_bounds_function:
# a, b = bounds
# if (interval_start < a or almost_equals(interval_start, a)) and (
#
# ):
@property
def dictionary_bounds_function(self):
return self._dictionary_bounds_function
class FunctionPiecewiseLinear(FunctionComposite):
def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False):
self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output)
dictionary_bounds_function = {}
for i in xrange(1, len(self.input_list)):
x_0, x_1 = self.input_list[i - 1], self.input_list[i]
y_0, y_1 = self.output_list[i - 1], self.output_list[i]
dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1)
if NEGATIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined
if POSITIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined
FunctionComposite.__init__(self, dictionary_bounds_function,
function_undefined=function_undefined,
domain=self.input_list,
is_normalised=is_normalised)
def normalised(self):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
if almost_equals(area, 0):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
dictionary_input_output = {}
output_list = [y / area for y in self.output_list]
for i in xrange(len(self.input_list)):
dictionary_input_output[self.input_list[i]] = output_list[i]
result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined)
result.is_normalised = True
return result
def __and__(self, other):
for bounds in self.dictionary_bounds_function:
a, b = bounds
linear_function = self.dictionary_bounds_function[bounds]
if __name__ == '__main__':
a = FunctionLinear(1, 0)
b = FunctionLinear(-1, 1)
print a.intersect(b)
|
agpl-3.0
|
raulf2012/pourbaix_pymatgen
|
pd_screen_tools.py
|
1
|
5815
|
def ORR_line(pH):
"""
"""
#start_fold - ORR_line
intercept = 1.23
slope = -0.0591
V = slope*pH + intercept
return V
#end_fold
def stable_mat_one_elem(pH,V,mat):
"""
Returns pymatgen pourbaix entry object corresponding to most stable species
Args:
pH: pH of system
V: Potential with reference to the Standard Hydrogen Electrode (SHE)
"""
#start_fold - stable_mat_one_elem
from pourdiag_single import pourdiag_single as pd_sgle
from pymatgen.analysis.pourbaix.maker import PourbaixDiagram
# Access entry Gibbs(pH, V)
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
pH = 0
V = 1
mat = 'Pt'
all_entries = pd_sgle(mat)
pourbaix = PourbaixDiagram(all_entries)
PA=PourbaixAnalyzer(pourbaix)
templist=[]
for i in all_entries:
templist.append(PA.g(i,pH,V))
minE=min(templist)
for i in all_entries:
if PA.g(i,pH,V)==minE:
StableSpec=i
return StableSpec # Returns the entries object of the stable species
#end_fold
def plot_reg(coord_data):
"""
Plots the region boundaries for the given region
Args:
coord_data: Coordinate data of region of interest, must be in the form
of [[[x-points],[y-points]],[], ...]
"""
#start_fold - plot_reg
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for line in coord_data:
ax.plot(line[0],line[1])
plt.show()
#end_fold
def phase_coord(entries, atom_comp, prim_elem=False):
"""
Produces a list of line segments corresponding to each phase area in a PD
along with the PD entries corresponding to each area.
The produced list is of the following form:
list = [[[coordinate data], [pourbaix entry]], [], [] .... ]
Args:
entries: List of entries in a PD
atom_comp: Composition of atom if system is binary, given as a fraction
between 0 and 1. Corresponds to the element with lowest atomic number if
prim_elem is left to its default
prim_elem: Primary element to which the atom_comp is assigned
"""
#start_fold - phase_coord
from pymatgen.analysis.pourbaix.maker import PourbaixDiagram
from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter
from entry_methods import base_atom
base_atoms = base_atom(entries)
mat0 = base_atoms[0]
if len(base_atoms)==2: mat1 = base_atoms[1]
else: mat1 = mat0
# If the primary element is declared then set the given composition to it
if not prim_elem==False:
for atom in base_atoms:
if atom==prim_elem:
mat0 = atom
else: mat1 = atom
pd = PourbaixDiagram(entries,{mat0: atom_comp,mat1: 1-atom_comp})
pl = PourbaixPlotter(pd)
ppd = pl.pourbaix_plot_data([[-2, 16],[-3, 3]]) #ppd == Pourbaix_Plot Data
pd_lst = []
cnt = 0
for stable_entry in ppd[0]:
pd_lst.append([])
pd_lst[cnt].append(ppd[0][stable_entry])
pd_lst[cnt].append(stable_entry.entrylist)
cnt = cnt+1
return pd_lst
#end_fold
def phase_filter(phase_coord,phase_type):
"""
Returns a list of Pourbaix diagrams regions and corresponding species
that match the specified phase_type
Args:
phase_coord: PD phase coordinate data produced from phase_coord
phase_type: Type of phase that will be filtered for. Samples include the following:
metallic, oxide, metallic_metallic, metallic_oxide, oxide_oxide,
metallic_aqueous oxide_aqueous, aqueous_aqueous
"""
#start_fold - phase_filter
## METALLIC: 1 METALLIC PHASE
# For a binary system this is necessarily an alloy
#TMP
if phase_type == 'metallic':
met_phase_lst = []
for region in phase_coord:
is_oxide_phase = False
if len(region[1]) == 1:
if region[1][0].phase_type == 'Solid':
for elem in region[1][0].composition.elements:
if elem.symbol == 'O': is_oxide_phase = True
if is_oxide_phase == False:
met_phase_lst.append(region)
return met_phase_lst
## METALLIC_METALLIC: 2 METALLIC SPECIES
# May be 2 single atomic species (ex. Ni(s) + Pt(s)) or two alloys (ex. NiPt2(s) + Ni2Pt(s))
if phase_type == 'metallic_metallic':
met_met_phase_lst = []
for region in phase_coord:
c1 = len(region[1]) == 2
if len(region[1]) == 2:
c2 = region[1][0].phase_type == 'Solid'
c3 = region[1][1].phase_type == 'Solid'
if c2 and c3:
is_oxide_phase = False
for elem in region[1][0].composition.elements:
if elem.symbol == 'O': is_oxide_phase = True
for elem in region[1][1].composition.elements:
if elem.symbol == 'O': is_oxide_phase = True
if is_oxide_phase == False:
met_met_phase_lst.append(region)
return met_met_phase_lst
#end_fold
def is_solid_phase(mat1, mat2, mat1_co=0.5):
"""
Returns TRUE is there exists a all solid phase in the binary Pourbaix Diagram
This means that the phase doesn't have any aqueous species
Args:
mat1:
mat2:
mat1_co:
"""
#start_fold - is_solid_phase
from pourdiag import pourdiag # Returns Pourbaix entries for binary system
from pymatgen.analysis.pourbaix.maker import PourbaixDiagram
from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter
mat2_co = 1-mat1_co
pd_b = pourdiag(mat1,mat2)
return pd_b
pd = PourbaixDiagram(pd_b,{mat1: mat1_co,mat2: mat2_co})
pl = PourbaixPlotter(pd)
ppd = pl.pourbaix_plot_data([[-2, 16],[-3, 3]]) #ppd == Pourbaix_Plot Data
pd_lst = []
cnt = 0
for stable_entry in ppd[0]:
pd_lst.append([])
pd_lst[cnt].append(ppd[0][stable_entry])
pd_lst[cnt].append(stable_entry.entrylist)
cnt = cnt+1
solidphase=False
for i in pd_lst:
if len(i[1])==1:
if i[1][0].phase_type == 'Solid':
solidphase=True
if len(i[1])==2:
if i[1][0].phase_type and i[1][1].phase_type == 'Solid':
solidphase=True
return solidphase
#end_fold
|
mit
|
ostrokach/elaspic
|
tests/test_elaspic_predictor.py
|
1
|
4634
|
import os.path as op
import pickle
import pandas as pd
import elaspic
import elaspic.elaspic_predictor
_foldx_core_features = [
# FoldX
# (wildtype)
'dg_wt',
'backbone_hbond_wt', 'sidechain_hbond_wt', 'van_der_waals_wt',
'electrostatics_wt', 'solvation_polar_wt', 'solvation_hydrophobic_wt',
'van_der_waals_clashes_wt', 'entropy_sidechain_wt', 'entropy_mainchain_wt',
'sloop_entropy_wt', 'mloop_entropy_wt', 'cis_bond_wt', 'torsional_clash_wt',
'backbone_clash_wt', 'helix_dipole_wt', 'water_bridge_wt', 'disulfide_wt',
'electrostatic_kon_wt', 'partial_covalent_bonds_wt', 'energy_ionisation_wt',
'entropy_complex_wt',
'number_of_residues',
# (change)
'dg_change',
'backbone_hbond_change', 'sidechain_hbond_change', 'van_der_waals_change',
'electrostatics_change', 'solvation_polar_change', 'solvation_hydrophobic_change',
'van_der_waals_clashes_change', 'entropy_sidechain_change', 'entropy_mainchain_change',
'sloop_entropy_change', 'mloop_entropy_change', 'cis_bond_change', 'torsional_clash_change',
'backbone_clash_change', 'helix_dipole_change', 'water_bridge_change', 'disulfide_change',
'electrostatic_kon_change', 'partial_covalent_bonds_change', 'energy_ionisation_change',
'entropy_complex_change',
# 'number_of_residues_change' <-- does not make sense
]
_foldx_interface_features = (
['intraclashes_energy_1_wt', 'intraclashes_energy_2_wt',
'intraclashes_energy_1_change', 'intraclashes_energy_2_change'] +
_foldx_core_features
)
_physicochem_features = [
# PhysicoChemical properties
'pcv_salt_equal_wt', 'pcv_salt_equal_self_wt',
'pcv_salt_equal_change', 'pcv_salt_equal_self_change',
'pcv_salt_opposite_wt', 'pcv_salt_opposite_self_wt',
'pcv_salt_opposite_change', 'pcv_salt_opposite_self_change',
'pcv_hbond_wt', 'pcv_hbond_self_wt',
'pcv_hbond_change', 'pcv_hbond_self_change',
'pcv_vdw_wt', 'pcv_vdw_self_wt',
'pcv_vdw_change', 'pcv_vdw_self_change',
]
_remaining_features = [
# Alignment
'alignment_identity', 'alignment_coverage', 'alignment_score', 'matrix_score',
# Model
'norm_dope',
# Sequence
'provean_score', 'secondary_structure_wt', 'secondary_structure_change',
# Structure
'solvent_accessibility_wt', 'solvent_accessibility_change',
]
def test__get_foldx_features_core():
expected = _foldx_core_features
actual = elaspic.elaspic_predictor._get_foldx_features('core')
xor = set(expected) ^ set(actual)
assert not xor, xor
def test__get_foldx_features_interface():
expected = _foldx_interface_features
actual = elaspic.elaspic_predictor._get_foldx_features('interface')
xor = set(expected) ^ set(actual)
assert not xor, xor
def test__get_physicochem_features():
expected = _physicochem_features
actual = elaspic.elaspic_predictor._get_physicochem_features()
xor = set(expected) ^ set(actual)
assert not xor, xor
def test_feature_columns_core():
expected = _foldx_core_features + _physicochem_features + _remaining_features
actual = elaspic.elaspic_predictor.FEATURE_COLUMNS_CORE
xor = set(expected) ^ set(actual)
assert not xor, xor
def test_feature_columns_interface():
expected = _foldx_interface_features + _physicochem_features + _remaining_features
actual = elaspic.elaspic_predictor.FEATURE_COLUMNS_INTERFACE
xor = set(expected) ^ set(actual)
assert not xor, xor
def test_format_mutation_features():
df = pd.read_csv(op.join(op.splitext(__file__)[0], 'df2.tsv'), sep='\t')
assert df['stability_energy_wt'].notnull().all()
assert df['stability_energy_mut'].notnull().all()
df2 = elaspic.elaspic_predictor.format_mutation_features(df)
assert df2['dg_wt'].notnull().all()
assert df2['dg_wt'].dtype != object
assert df2['dg_mut'].notnull().all()
assert df2['dg_mut'].dtype != object
df3 = elaspic.elaspic_predictor.convert_features_to_differences(df2)
assert df3['dg_change'].notnull().all()
def test_core_predictor():
# Load predictor
with open(op.join(elaspic.CACHE_DIR, 'core_clf.pickle'), 'rb') as fh:
clf = pickle.load(fh)
# Load data
dfs = [
# pd.read_csv(op.join(op.splitext(__file__)[0], 'df1.tsv'), sep='\t'),
pd.read_csv(op.join(op.splitext(__file__)[0], 'df2.tsv'), sep='\t')
]
# Predict
for df in dfs:
df = elaspic.elaspic_predictor.format_mutation_features(df)
df = elaspic.elaspic_predictor.convert_features_to_differences(df)
df['ddg'] = clf.predict(df[clf.features])
assert df['ddg'].notnull().all()
|
mit
|
CI-WATER/gsshapy
|
gsshapy/modeling/model.py
|
1
|
14479
|
# -*- coding: utf-8 -*-
#
# model.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# BSD 3-Clause
from datetime import timedelta
import logging
import uuid
import os
from gazar.grid import GDALGrid
import geopandas as gpd
from .event import EventMode, LongTermMode
from ..orm import WatershedMaskFile, ElevationGridFile, MapTableFile
from ..lib import db_tools as dbt
from ..util.context import tmp_chdir
log = logging.getLogger(__name__)
class GSSHAModel(object):
"""
This class manages the generation and modification of
models for GSSHA.
Parameters:
project_directory(str): Directory to write GSSHA project files to.
project_name(Optional[str]): Name of GSSHA project. Required for new model.
mask_shapefile(Optional[str]): Path to watershed boundary shapefile. Required for new model.
auto_clean_mask_shapefile(Optional[bool]): Chooses the largest region if the input is a multipolygon. Default is False.
grid_cell_size(Optional[str]): Cell size of model (meters). Required for new model.
elevation_grid_path(Optional[str]): Path to elevation raster used for GSSHA grid. Required for new model.
simulation_timestep(Optional[float]): Overall model timestep (seconds). Sets TIMESTEP card. Required for new model.
out_hydrograph_write_frequency(Optional[str]): Frequency of writing to hydrograph (minutes). Sets HYD_FREQ card. Required for new model.
roughness(Optional[float]): Value of uniform manning's n roughness for grid. Mutually exlusive with land use roughness. Required for new model.
land_use_grid(Optional[str]): Path to land use grid to use for roughness. Mutually exlusive with roughness. Required for new model.
land_use_grid_id(Optional[str]): ID of default grid supported in GSSHApy. Mutually exlusive with roughness. Required for new model.
land_use_to_roughness_table(Optional[str]): Path to land use to roughness table. Use if not using land_use_grid_id. Mutually exlusive with roughness. Required for new model.
load_rasters_to_db(Optional[bool]): If True, it will load the created rasters into the database. IF you are generating a large model, it is recommended to set this to False. Default is True.
db_session(Optional[database session]): Active database session object. Required for existing model.
project_manager(Optional[ProjectFile]): Initialized ProjectFile object. Required for existing model.
Model Generation Example:
.. code:: python
from datetime import datetime, timedelta
from gsshapy.modeling import GSSHAModel
model = GSSHAModel(project_name="gssha_project",
project_directory="/path/to/gssha_project",
mask_shapefile="/path/to/watershed_boundary.shp",
auto_clean_mask_shapefile=True,
grid_cell_size=1000,
elevation_grid_path="/path/to/elevation.tif",
simulation_timestep=10,
out_hydrograph_write_frequency=15,
land_use_grid='/path/to/land_use.tif',
land_use_grid_id='glcf',
load_rasters_to_db=False,
)
model.set_event(simulation_start=datetime(2017, 2, 28, 14, 33),
simulation_duration=timedelta(seconds=180*60),
rain_intensity=2.4,
rain_duration=timedelta(seconds=30*60),
)
model.write()
"""
def __init__(self,
project_directory,
project_name=None,
mask_shapefile=None,
auto_clean_mask_shapefile=False,
grid_cell_size=None,
elevation_grid_path=None,
simulation_timestep=30,
out_hydrograph_write_frequency=10,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None,
load_rasters_to_db=True,
db_session=None,
project_manager=None,
):
self.project_directory = project_directory
self.db_session = db_session
self.project_manager = project_manager
self.load_rasters_to_db = load_rasters_to_db
if project_manager is not None and db_session is None:
raise ValueError("'db_session' is required to edit existing model if 'project_manager' is given.")
if project_manager is None and db_session is None:
if project_name is not None and mask_shapefile is None and elevation_grid_path is None:
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory)
self.db_session = db_sessionmaker()
self.project_manager.readInput(directory=self.project_directory,
projectFileName="{0}.prj".format(project_name),
session=self.db_session)
else:
# generate model
if None in (project_name, mask_shapefile, elevation_grid_path):
raise ValueError("Need to set project_name, mask_shapefile, "
"and elevation_grid_path to generate "
"a new GSSHA model.")
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory, map_type=0)
self.db_session = db_sessionmaker()
self.db_session.add(self.project_manager)
self.db_session.commit()
# ADD BASIC REQUIRED CARDS
# see http://www.gsshawiki.com/Project_File:Required_Inputs
self.project_manager.setCard('TIMESTEP',
str(simulation_timestep))
self.project_manager.setCard('HYD_FREQ',
str(out_hydrograph_write_frequency))
# see http://www.gsshawiki.com/Project_File:Output_Files_%E2%80%93_Required
self.project_manager.setCard('SUMMARY',
'{0}.sum'.format(project_name),
add_quotes=True)
self.project_manager.setCard('OUTLET_HYDRO',
'{0}.otl'.format(project_name),
add_quotes=True)
# ADD REQUIRED MODEL GRID INPUT
if grid_cell_size is None:
# caluclate cell size from elevation grid if not given
# as input from the user
ele_grid = GDALGrid(elevation_grid_path)
utm_bounds = ele_grid.bounds(as_utm=True)
x_cell_size = (utm_bounds[1] - utm_bounds[0])/ele_grid.x_size
y_cell_size = (utm_bounds[3] - utm_bounds[2])/ele_grid.y_size
grid_cell_size = min(x_cell_size, y_cell_size)
ele_grid = None
log.info("Calculated cell size is {grid_cell_size}"
.format(grid_cell_size=grid_cell_size))
if auto_clean_mask_shapefile:
mask_shapefile = self.clean_boundary_shapefile(mask_shapefile)
self.set_mask_from_shapefile(mask_shapefile, grid_cell_size)
self.set_elevation(elevation_grid_path, mask_shapefile)
self.set_roughness(roughness=roughness,
land_use_grid=land_use_grid,
land_use_grid_id=land_use_grid_id,
land_use_to_roughness_table=land_use_to_roughness_table,
)
@staticmethod
def clean_boundary_shapefile(shapefile_path):
"""
Cleans the boundary shapefile to that there is only one main polygon.
:param shapefile_path:
:return:
"""
wfg = gpd.read_file(shapefile_path)
first_shape = wfg.iloc[0].geometry
if hasattr(first_shape, 'geoms'):
log.warning("MultiPolygon found in boundary. "
"Picking largest area ...")
# pick largest shape to be the watershed boundary
# and assume the other ones are islands to be removed
max_area = -9999.0
main_geom = None
for geom in first_shape.geoms:
if geom.area > max_area:
main_geom = geom
max_area = geom.area
# remove self intersections
if not main_geom.is_valid:
log.warning("Invalid geometry found in boundary. "
"Attempting to self clean ...")
main_geom = main_geom.buffer(0)
wfg.loc[0, 'geometry'] = main_geom
out_cleaned_boundary_shapefile = \
os.path.splitext(shapefile_path)[0] +\
str(uuid.uuid4()) +\
'.shp'
wfg.to_file(out_cleaned_boundary_shapefile)
log.info("Cleaned boundary shapefile written to:"
"{}".format(out_cleaned_boundary_shapefile))
return out_cleaned_boundary_shapefile
return shapefile_path
def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db)
def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db)
def set_outlet(self, latitude, longitude, outslope):
"""
Adds outlet point to project
"""
self.project_manager.setOutlet(latitude=latitude, longitude=longitude,
outslope=outslope)
def set_roughness(self,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None):
"""
ADD ROUGHNESS FROM LAND COVER
See: http://www.gsshawiki.com/Project_File:Overland_Flow_%E2%80%93_Required
"""
if roughness is not None:
self.project_manager.setCard('MANNING_N', str(roughness))
elif land_use_grid is not None and (land_use_grid_id is not None \
or land_use_to_roughness_table is not None):
# make sure paths are absolute as the working directory changes
land_use_grid = os.path.abspath(land_use_grid)
if land_use_to_roughness_table is not None:
land_use_to_roughness_table = os.path.abspath(land_use_to_roughness_table)
mapTableFile = MapTableFile(project_file=self.project_manager)
mapTableFile.addRoughnessMapFromLandUse("roughness",
self.db_session,
land_use_grid,
land_use_to_roughness_table=land_use_to_roughness_table,
land_use_grid_id=land_use_grid_id)
else:
raise ValueError("Need to either set 'roughness', or need "
"to set values from land use grid ...")
def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration)
def write(self):
"""
Write project to directory
"""
# write data
self.project_manager.writeInput(session=self.db_session,
directory=self.project_directory,
name=self.project_manager.name)
|
bsd-3-clause
|
imaculate/scikit-learn
|
sklearn/utils/testing.py
|
13
|
27312
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions."""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, defaut to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
pbreach/pysd
|
docs/conf.py
|
2
|
10253
|
# -*- coding: utf-8 -*-
#
# PySD documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 18 10:02:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'scipy.stats',
'scipy.integrate', 'pandas', 'parsimonious', 'parsimonious.nodes',
'lxml', 'xarray', 'autopep8']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
print os.path.abspath('../')
sys.path.insert(0, os.path.abspath('../')) #this *should* be adding to the beginning...
# Build the translated functions file
#from pysd import vensim2py
#with open('development/supported_vensim_functions.rst', 'w') as fout:
# fout.write(vensim2py.doc_supported_vensim_functions())
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySD'
copyright = u'2016, James Houghton'
author = u'James Houghton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
exec(open('../pysd/_version.py').read())
version = '.'.join(__version__.split('.')[:-1])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySDdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PySD.tex', u'PySD Documentation',
u'James Houghton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysd', u'PySD Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PySD', u'PySD Documentation',
author, 'PySD', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None),
'pysdcookbook': ('http://pysd-cookbook.readthedocs.org/en/latest/', None)}
|
mit
|
rothnic/bokeh
|
bokeh/util/serialization.py
|
31
|
7419
|
""" Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
is_numpy = None
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
Args:
datum (list) : a list of values or lists
is_numpy: True if numpy is present (see imports)
use_numpy: toggle numpy as a dependency for testing purposes
"""
is_numpy = is_numpy and use_numpy
if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
|
bsd-3-clause
|
stuart-knock/bokeh
|
bokeh/charts/builder/histogram_builder.py
|
43
|
9142
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.