repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
elvandy/nltools | nltools/tests/test_design_matrix.py | 1 | 3854 | import os
import numpy as np
import nibabel as nb
import pandas as pd
import glob
from nltools.data import Design_Matrix
from nltools.external.hrf import glover_hrf
def test_add_poly(sim_design_matrix):
matp = sim_design_matrix.add_poly(2)
assert matp.shape[1] == 7
assert sim_design_matrix.add_poly(2, include_lower=False).shape[1] == 5
def test_add_dct_basis(sim_design_matrix):
matpd = sim_design_matrix.add_dct_basis()
assert matpd.shape[1] == 15
def test_vif(sim_design_matrix):
matpd = sim_design_matrix.add_poly(2).add_dct_basis()
assert all(matpd.vif() < 2.0)
assert not all(matpd.vif(exclude_polys=False) < 2.0)
matc = matpd.clean()
assert matc.shape[1] == 16
def test_convolve(sim_design_matrix):
TR=2.0
assert sim_design_matrix.convolve().shape == sim_design_matrix.shape
hrf = glover_hrf(TR,oversampling=1.)
assert sim_design_matrix.convolve(conv_func=np.column_stack([hrf,hrf])).shape[1] == sim_design_matrix.shape[1] + 4
def test_zscore(sim_design_matrix):
matz = sim_design_matrix.zscore(columns = ['face_A','face_B'])
assert (matz[['house_A','house_B']] == sim_design_matrix[['house_A','house_B']]).all().all()
def test_replace(sim_design_matrix):
assert sim_design_matrix.replace_data(np.zeros((500,4))).shape == sim_design_matrix.shape
def test_upsample(sim_design_matrix):
newTR = 1.
target = 1./newTR
assert sim_design_matrix.upsample(target).shape[0] == sim_design_matrix.shape[0]*2 - target*2
def test_downsample(sim_design_matrix):
newTR = 4.
target = 1./newTR
assert sim_design_matrix.downsample(target).shape[0] == sim_design_matrix.shape[0]/2
def test_append(sim_design_matrix):
mats = sim_design_matrix.append(sim_design_matrix)
assert mats.shape[0] == sim_design_matrix.shape[0] * 2
# Keep polys separate by default
assert (mats.shape[1] - 4) == (sim_design_matrix.shape[1] - 4) * 2
# Otherwise stack them
assert sim_design_matrix.append(sim_design_matrix,
keep_separate=False).shape[1] == sim_design_matrix.shape[1]
# Keep a single stimulus column separate
assert sim_design_matrix.append(sim_design_matrix,
unique_cols=['face_A']).shape[1] == 5
# Keep a common stimulus class separate
assert sim_design_matrix.append(sim_design_matrix,
unique_cols=['face*']).shape[1] == 6
# Keep a common stimulus class and a different single stim separate
assert sim_design_matrix.append(sim_design_matrix,
unique_cols=['face*','house_A']).shape[1] == 7
# Keep multiple stimulus class separate
assert sim_design_matrix.append(sim_design_matrix,
unique_cols=['face*','house*']).shape[1] == 8
# Growing a multi-run design matrix; keeping things separate
num_runs = 4
all_runs = Design_Matrix(sampling_freq=.5)
for i in range(num_runs):
run = Design_Matrix(np.array([
[1,0,0,0],
[1,0,0,0],
[0,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,0],
[0,0,1,0],
[0,0,1,0],
[0,0,0,0],
[0,0,0,1],
[0,0,0,1]
]),
sampling_freq = .5,
columns=['stim_A','stim_B','cond_C','cond_D']
)
run = run.add_poly(2)
all_runs = all_runs.append(run,unique_cols=['stim*','cond*'])
assert all_runs.shape == (44, 28)
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/table.py | 10 | 20553 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
from matplotlib.path import Path
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
self.stale = True
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
self.stale = True
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
self.stale = False
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
self.stale = True
class CustomCell(Cell):
"""
A subclass of Cell where the sides may be visibly toggled.
"""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, *args, **kwargs):
visible_edges = kwargs.pop('visible_edges')
Cell.__init__(self, *args, **kwargs)
self.visible_edges = visible_edges
@property
def visible_edges(self):
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
for edge in value:
if edge not in self._edges:
msg = ('Invalid edge param {0}, must only be one of'
' {1} or string of {2}.').format(
value,
", ".join(self._edge_aliases.keys()),
", ".join(self._edges),
)
raise ValueError(msg)
self._visible_edges = value
self.stale = True
def get_path(self):
'Return a path where the edges specificed by _visible_edges are drawn'
codes = [Path.MOVETO]
for edge in self._edges:
if edge in self._visible_edges:
codes.append(Path.LINETO)
else:
codes.append(Path.MOVETO)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._edges = None
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
self.stale = True
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
self.stale = True
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self.figure._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
# for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
self.stale = False
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
renderer = self.figure._cachedRenderer
if renderer is not None:
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
self.stale = True
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
self.stale = True
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
self.stale = True
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = list(xrange(len(self.codes)))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed',
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed')
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
if cellColours is None and cellText is None:
raise ValueError('At least one argument from "cellColours" or '
'"cellText" must be provided to create a table.')
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * cols] * rows
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
if len(row) != cols:
msg = "Each row in 'cellText' must have {0} columns"
raise ValueError(msg.format(cols))
if cellColours is not None:
if len(cellColours) != rows:
raise ValueError("'cellColours' must have {0} rows".format(rows))
for row in cellColours:
if len(row) != cols:
msg = "Each row in 'cellColours' must have {0} columns"
raise ValueError(msg.format(cols))
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
if len(rowLabels) != rows:
raise ValueError("'rowLabels' must be of length {0}".format(rows))
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
table.edges = edges
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| bsd-3-clause |
fmilano/mitk | Modules/Biophotonics/python/iMC/scripts/ipcai_to_theano/input_icai_data.py | 6 | 3612 | """
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
from __future__ import print_function
import os
import numpy
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
from regression.preprocessing import preprocess
__docformat__ = 'restructedtext en'
def create_dataset(path_to_simulation_results):
df = pd.read_csv(path_to_simulation_results, header=[0, 1])
X, y = preprocess(df, snr=10.)
y = y.values
return X, y
def load_data(data_root):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
TRAIN_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt")
TEST_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt")
train_set = create_dataset(TRAIN_IMAGES)
valid_set = create_dataset(TEST_IMAGES)
test_set = (np.load("sample_image.npy"), np.array([0]))
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, shared_y
test_set_x, test_set_y = shared_dataset(test_set, 0)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
| bsd-3-clause |
jake-mason/kmeans-conceptual-metaphors | script.py | 1 | 5543 | # coding: utf-8
""" Created on Mon Nov 30 2015 and @author: jakemason """
from __future__ import print_function
import urllib2, sys, os
import re, nltk, csv, requests, math, functools, codecs
import matplotlib.pyplot as plt, matplotlib.dates, matplotlib, pylab as pl
import pandas, numpy, codecs
from sklearn import feature_extraction
from operator import itemgetter, attrgetter, methodcaller
from bs4 import BeautifulSoup, NavigableString
from string import punctuation as p
from multiprocessing import Pool
import glob
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from nltk.stem import *
from nltk.stem.snowball import SnowballStemmer
# replace certain punctuation in str item
def replacePunct(string):
to_remove = [',', '.', '!', '-', '—']
for char in to_remove:
string = string.replace(char,'')
return string
# shorter version of processURL that doesn't get filename; just returns text
def processURL_short(l):
open_url = urllib2.urlopen(l).read()
item_soup = BeautifulSoup(open_url)
item_div = item_soup.find('div',{'id':'transcript'},{'class':'displaytext'})
item_str = item_div.text.lower()
item_str = replacePunct(item_str)
return item_str
# needed to get filenames a lot below; made function
def getFilename(l):
splitlink = l.split("/")
president = splitlink[4]
speech_num = splitlink[-1]
filename = "{0}_{1}".format(president, speech_num)
return filename
def processURL(l):
open_url = urllib2.urlopen(l).read()
item_soup = BeautifulSoup(open_url)
item_div = item_soup.find('div',{'id':'transcript'},{'class':'displaytext'})
item_str = item_div.text.lower()
item_str_processed = punctuation.sub(' ',item_str)
item_str_processed_final = item_str_processed.replace('—',' ').replace('transcript','',1).replace('\n','')
splitlink = l.split("/")
president = splitlink[4]
speech_num = splitlink[-1]
filename = "{0}_{1}".format(president, speech_num)
return filename, item_str_processed_final # giving back filename and the text itself
# merge dictionaries
def merge_dicts(d1,d2):
contractions = d1.copy()
contractions.update(d2)
return contractions
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf8')
#==============================================================================
# Scrape all speech links
#==============================================================================
# remove punctuation
punctuation = re.compile('[{}]+'.format(re.escape(p)))
url = 'http://www.millercenter.org/president/speeches'
url2 = 'http://www.millercenter.org'
connection = urllib2.urlopen(url)
html = connection.read()
date_soup = BeautifulSoup(urllib2.urlopen(url), "html.parser")
speeches = date_soup.select('div#listing div.title a[href*=speeches]')
# this list is useful later for creating filenames
end_link = [tag.get('href') for tag in speeches if tag.get('href') is not None]
# concatenate 'http://www.millercenter.org' with each speech's URL ending
every_link = [url2 + end for end in end_link]
# list of all 43 presidents
presidents = [l[l.find('president/')+len('president/'):] for l in every_link if 'president' in l]
presidents = [l[0:l.find('/')] for l in presidents]
pres_list = set(presidents)
# run processURL() for all links in every_link; save to Users drive
os.chdir('/Users/jacobmason/Documents/Python/speeches/')
for l in every_link:
filename, content = processURL(l) # tuple matching with what processURL returns
with open(filename + '.txt', 'w') as f: # create .txt file for each speech
f.write(content) # write speech to file
#========================
# K-means text clustering
#========================
# load the speeches into a dictionary to run through K-means algorithm
speech_dict = {}
for filename in glob.glob("/Users/jacobmason/Documents/Python/speeches/*.txt"): # all the filenames
with open(filename, 'r') as inputFile:
filecontent = inputFile.read()
filecontent = filecontent.decode('utf-8')
speech_dict[filename] = filecontent # put the speeches into a dictionary to run through the algorithm
# create a list from dictionary of speeches
speech_list = list(speech_dict.values())
nltk.download('stopwords')
stopset = set(nltk.corpus.stopwords.words('english'))
# these are some words I wanted the algorithm to ignore, because they're not meaningful usually
add_stopwords_list = [word.encode("utf8") for word in ['american','uh','upon','us','ladies','gentlemen']]
for w in add_stopwords_list:
w = w.encode('utf-8')
stopset.add(w)
# specify number of clusters
k = 5
vectorizer = TfidfVectorizer(stop_words=stopset)
X = vectorizer.fit_transform(speech_list)
model = KMeans(n_clusters=k, init='k-means++', max_iter=100, n_init=1)
model.fit(X)
print("Top terms per cluster:")
order_centroids = model.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(k):
print("Cluster %d:" % i),
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind]),
print()
| mit |
luo66/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
perryjohnson/biplaneblade | biplane_blade_lib/prep_stn15_mesh.py | 1 | 30757 | """Write initial TrueGrid files for one biplane blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run biplane_blade_lib/prep_stnXX_mesh.py
or
|> import biplane_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 29, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
reload(bl)
import lib.poly_utils as pu
reload(pu)
from shapely.geometry import Polygon
from shapely.affinity import translate
# SET THESE PARAMETERS -----------------
station_num = 15
# --------------------------------------
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure and airfoil for this station
st = station.structure
af = station.airfoil
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.lower_spar_cap.layer['upper']
is2 = st.lower_internal_surface_2.layer['resin']
points_usc = [
(-0.75, usc.left[0][1]), # lower_SparCap_upper.txt
is2.polygon.interiors[0].coords[-2], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[44-32], # lower_InternalSurface2_resin.txt
( 0.75, usc.right[1][1]), # lower_SparCap_upper.txt
( 0.75, 0.0),
(-0.75, 0.0)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.lower_spar_cap.layer['lower']
points_lsc = [
(-0.75,-6.5),
( 0.75,-6.5),
( 0.75000000, lsc.right[0][1]), # lower_SparCap_lower.txt
is2.polygon.interiors[0].coords[43-32], # lower_InternalSurface2_resin.txt
is2.polygon.interiors[0].coords[-1], # lower_InternalSurface2_resin.txt
(-0.75000000, lsc.left[1][1]) # lower_SparCap_lower.txt
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.lower_TE_reinforcement.layer['foam']
is4 = st.lower_internal_surface_4.layer['resin']
points_teu1 = [
(ter.top[0][0], -3.5), # TE_Reinforcement_foam.txt
(ter.top[0][0], -4.6), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[343-149], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[343-149][0], -3.5) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -5.0), # TE_Reinforcement_foam.txt
(ter.bottom[0][0], -4.6), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[343-149], # InternalSurface4_resin.txt
(is4.polygon.interiors[0].coords[343-149][0], -5.0) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
ter.polygon.exterior.coords[50-3], # lower_TE_reinforcement_foam.txt
(ter.polygon.exterior.coords[50-3][0], -3.5) # lower_TE_reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -5.0),
points_teu2[1],
points_teu2[2],
(points_teu2[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0], # TE_Reinforcement_foam.txt
(ter.polygon.exterior.coords[0][0], -3.5) # TE_Reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -5.0),
points_teu3[1],
points_teu3[2],
(points_teu3[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'foam', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, upper 4 ------------------------------------------------
label = 'TE reinforcement, upper 4'
# create the bounding polygon
es = st.lower_external_surface.layer['gelcoat']
teru = st.lower_TE_reinforcement.layer['uniax']
points_teu4 = [
points_teu3[-1],
points_teu3[-2],
(teru.polygon.exterior.coords[-2][0], -4.768), # TE_Reinforcement_uniax.txt
teru.polygon.exterior.coords[-2], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-2],
(teru.polygon.exterior.coords[-2][0], -3.5) # TE_Reinforcement_uniax.txt
]
bounding_polygon = Polygon(points_teu4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# TE reinforcement, lower 4 ------------------------------------------------
label = 'TE reinforcement, lower 4'
# create the bounding polygon
points_tel4 = [
(points_teu4[0][0], -5.0),
points_teu4[1],
points_teu4[2],
teru.polygon.exterior.coords[-1], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-1],
(points_teu4[2][0], -5.0)
]
bounding_polygon = Polygon(points_tel4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_TE_reinforcement, 'uniax', label,
bounding_polygon, airfoil='lower')
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.lower_LE_panel.layer['foam']
is1 = st.lower_internal_surface_1.layer['resin']
points_le = [
(-3.00,-6.5),
(-0.836,-6.5),
tuple(lep.bottom[0]), # lower_LE_Panel_foam.txt
is1.polygon.interiors[0].coords[-2], # lower_InternalSurface1_resin.txt
(-1.5, -x3_off),
is1.polygon.interiors[0].coords[-1], # lower_InternalSurface1_resin.txt
tuple(lep.top[1]), # lower_LE_Panel_foam.txt
(-0.836, 0.0),
(-3.00, 0.0)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'triax', label,
bounding_polygon, airfoil='lower')
# upper aft panel 1 -------------------------------------------------------
label = 'upper aft panel 1'
# create the bounding polygon
ap1u = st.lower_aft_panel_1.layer['upper']
is3 = st.lower_internal_surface_3.layer['resin']
points_ap1u = [
(0.836, 0.0),
(ap1u.right[1][0], 0.0), # lower_AftPanel1_upper.txt
tuple(ap1u.right[1]), # lower_AftPanel1_upper.txt
is3.polygon.interiors[0].coords[113-59], # lower_InternalSurface3_resin.txt
(2.0, -4.5),
is3.polygon.interiors[0].coords[-2], # lower_InternalSurface3_resin.txt
tuple(ap1u.left[0]) # lower_AftPanel1_upper.txt
]
bounding_polygon = Polygon(points_ap1u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# lower aft panel 1 -------------------------------------------------------
label = 'lower aft panel 1'
# create the bounding polygon
ap1l = st.lower_aft_panel_1.layer['lower']
points_ap1l = [
(0.836, -6.5),
(ap1l.right[0][0], -6.5), # lower_AftPanel1_lower.txt
tuple(ap1l.right[0]), # lower_AftPanel1_lower.txt
is3.polygon.interiors[0].coords[112-59], # lower_InternalSurface3_resin.txt
(2.0, -4.5),
is3.polygon.interiors[0].coords[-1], # lower_InternalSurface3_resin.txt
tuple(ap1l.left[1]) # lower_AftPanel1_lower.txt
]
bounding_polygon = Polygon(points_ap1l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# upper aft panel 2 -------------------------------------------------------
label = 'upper aft panel 2'
# create the bounding polygon
ap2u = st.lower_aft_panel_2.layer['upper']
sw3br = st.lower_shear_web_3.layer['biax, right']
points_ap2u = [
(sw3br.right[0][0], 0.0),
(ap2u.right[1][0], 0.0), # AftPanel2_upper.txt
tuple(ap2u.right[1]), # AftPanel2_upper.txt
is4.polygon.interiors[0].coords[376-149], # InternalSurface4_resin.txt
(3.0, -4.6),
is4.polygon.interiors[0].coords[-2], # InternalSurface4_resin.txt
tuple(ap2u.left[0]) # AftPanel2_upper.txt
]
bounding_polygon = Polygon(points_ap2u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon)
# lower aft panel 2 -------------------------------------------------------
label = 'lower aft panel 2'
# create the bounding polygon
ap2l = st.lower_aft_panel_2.layer['lower']
points_ap2l = [
(sw3br.right[0][0], -5.4),
(ap2l.right[0][0], -5.4), # AftPanel2_lower.txt
tuple(ap2l.right[0]), # AftPanel2_lower.txt
is4.polygon.interiors[0].coords[246-149], # InternalSurface4_resin.txt
(3.0, -4.6),
is4.polygon.interiors[0].coords[-1], # InternalSurface4_resin.txt
tuple(ap2l.left[1]) # AftPanel2_lower.txt
]
bounding_polygon = Polygon(points_ap2l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon)
# above shear web 1 ----------------------------------------------------------
label = 'above shear web 1'
# create the bounding polygon
points_asw1 = [
(-0.75, 0.0),
(-0.75, -4.5),
(-0.836, -4.5),
(-0.836, 0.0)
]
bounding_polygon = Polygon(points_asw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 1 ----------------------------------------------------------
label = 'below shear web 1'
# create the bounding polygon
points_bsw1 = [
(-0.75, -6.5),
(-0.75, -5.0),
(-0.836, -5.0),
(-0.836, -6.5)
]
bounding_polygon = Polygon(points_bsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# above shear web 2 ----------------------------------------------------------
label = 'above shear web 2'
# create the bounding polygon
points_asw2 = [
(0.75, 0.0),
(0.75, -4.5),
(0.836, -4.5),
(0.836, 0.0)
]
bounding_polygon = Polygon(points_asw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 2 ----------------------------------------------------------
label = 'below shear web 2'
# create the bounding polygon
points_bsw2 = [
(0.75, -6.5),
(0.75, -5.0),
(0.836, -5.0),
(0.836, -6.5)
]
bounding_polygon = Polygon(points_bsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# above shear web 3 ----------------------------------------------------------
label = 'above shear web 3'
sw3bl = st.lower_shear_web_3.layer['biax, left']
# create the bounding polygon
points_asw3 = [
(sw3bl.left[0][0], 0.0),
(sw3bl.left[0][0], -4.5),
(sw3br.right[0][0], -4.5),
(sw3br.right[0][0], 0.0)
]
bounding_polygon = Polygon(points_asw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# below shear web 3 ----------------------------------------------------------
label = 'below shear web 3'
# create the bounding polygon
points_bsw3 = [
(sw3bl.left[0][0], -6.5),
(sw3bl.left[0][0], -4.7),
(sw3br.right[0][0], -4.7),
(sw3br.right[0][0], -6.5)
]
bounding_polygon = Polygon(points_bsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'triax', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_external_surface, 'gelcoat', label,
bounding_polygon, airfoil='lower')
# left of shear web 1 -------------------------------------------------------
label = 'left of shear web 1'
# create the bounding polygon
points_lsw1 = points_le[2:-2]
bounding_polygon = Polygon(points_lsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_1, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 1 -------------------------------------------------------
label = 'right of shear web 1'
# create the bounding polygon
points_rsw1 = [
points_usc[0],
points_usc[1],
(0.0, -x3_off),
points_lsc[-2],
points_lsc[-1]
]
bounding_polygon = Polygon(points_rsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# left of shear web 2 -------------------------------------------------------
label = 'left of shear web 2'
# create the bounding polygon
points_lsw2 = [
points_usc[3],
points_usc[2],
(0.0, -x3_off),
points_lsc[3],
points_lsc[2]
]
bounding_polygon = Polygon(points_lsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_2, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 2 -------------------------------------------------------
label = 'right of shear web 2'
# create the bounding polygon
points_rsw2 = [
points_ap1u[-1],
points_ap1u[-2],
(1.5, -x3_off),
points_ap1l[-2],
points_ap1l[-1]
]
bounding_polygon = Polygon(points_rsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# left of shear web 3 -------------------------------------------------------
label = 'left of shear web 3'
# create the bounding polygon
points_lsw3 = [
points_ap1u[2],
points_ap1u[3],
(2.0, -4.5),
points_ap1l[3],
points_ap1l[2]
]
bounding_polygon = Polygon(points_lsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_3, 'triax', label,
bounding_polygon, airfoil='lower')
# right of shear web 3 -------------------------------------------------------
label = 'right of shear web 3'
# create the bounding polygon
points_rsw3 = [
points_ap2u[-1],
points_ap2u[-2],
(3.0, -4.6),
points_ap2l[-2],
points_ap2l[-1]
]
bounding_polygon = Polygon(points_rsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'resin', label,
bounding_polygon, airfoil='lower')
pu.cut_plot_and_write_alt_layer(st.lower_internal_surface_4, 'triax', label,
bounding_polygon, airfoil='lower')
# -----------------------------------------------------------------------------
list_of_mesh_layers = []
# translate all the alt layers in each part
for (name, layer) in st.lower_TE_reinforcement.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_external_surface.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_1.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_2.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_3.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
for (name, layer) in st.lower_internal_surface_4.alt_layer.items():
layer.move(x3_off, alt_layer=True)
list_of_mesh_layers.append(layer)
# translate all the remaining regular layers
st.lower_spar_cap.layer['upper'].move(x3_off)
st.lower_spar_cap.layer['lower'].move(x3_off)
st.lower_aft_panel_1.layer['upper'].move(x3_off)
st.lower_aft_panel_1.layer['lower'].move(x3_off)
st.lower_aft_panel_2.layer['upper'].move(x3_off)
st.lower_aft_panel_2.layer['lower'].move(x3_off)
st.lower_LE_panel.layer['foam'].move(x3_off)
st.lower_shear_web_1.layer['biax, left'].move(x3_off)
st.lower_shear_web_1.layer['foam'].move(x3_off)
st.lower_shear_web_1.layer['biax, right'].move(x3_off)
st.lower_shear_web_2.layer['biax, left'].move(x3_off)
st.lower_shear_web_2.layer['foam'].move(x3_off)
st.lower_shear_web_2.layer['biax, right'].move(x3_off)
st.lower_shear_web_3.layer['biax, left'].move(x3_off)
st.lower_shear_web_3.layer['foam'].move(x3_off)
st.lower_shear_web_3.layer['biax, right'].move(x3_off)
list_of_mesh_layers.append(st.lower_spar_cap.layer['upper'])
list_of_mesh_layers.append(st.lower_spar_cap.layer['lower'])
list_of_mesh_layers.append(st.lower_aft_panel_1.layer['upper'])
list_of_mesh_layers.append(st.lower_aft_panel_1.layer['lower'])
list_of_mesh_layers.append(st.lower_aft_panel_2.layer['upper'])
list_of_mesh_layers.append(st.lower_aft_panel_2.layer['lower'])
list_of_mesh_layers.append(st.lower_LE_panel.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_1.layer['biax, right'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_2.layer['biax, right'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['biax, left'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['foam'])
list_of_mesh_layers.append(st.lower_shear_web_3.layer['biax, right'])
# plot the lower airfoil in the local beam coordinate system
# (translate it up by the appropriate gap distance: x3_off)
fig,ax = plt.subplots()
fmt1 = "Station #{0}, {1}, {2}% span\n"
fmt2 = "lower airfoil in local beam coordinate system (x3-offset = {3:+.4f})"
fmt = fmt1 + fmt2
ax.set_title(fmt.format(station.station_num, station.airfoil.name,
station.coords.x1, x3_off))
lp2 = translate(af.lower_polygon, yoff=x3_off)
(minx, miny, maxx, maxy) = lp2.bounds
ax.set_xlim([minx*1.2,maxx*1.2])
ax.set_ylim([miny*1.2,maxy*1.2])
plt.grid('on')
ax.set_xlabel('x2 [meters]')
ax.set_ylabel('x3 [meters]')
ax.set_aspect('equal')
for layer in list_of_mesh_layers:
station.plot_polygon(layer.polygon, ax, layer.face_color, layer.edge_color,
alpha=0.8)
# show the plots
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.lower_spar_cap.layer['upper'],
st.lower_spar_cap.layer['lower'],
st.lower_aft_panel_1.layer['upper'],
st.lower_aft_panel_1.layer['lower'],
st.lower_aft_panel_2.layer['upper'],
st.lower_aft_panel_2.layer['lower'],
st.lower_LE_panel.layer['foam'],
st.lower_shear_web_1.layer['biax, left'],
st.lower_shear_web_1.layer['foam'],
st.lower_shear_web_1.layer['biax, right'],
st.lower_shear_web_2.layer['biax, left'],
st.lower_shear_web_2.layer['foam'],
st.lower_shear_web_2.layer['biax, right'],
st.lower_shear_web_3.layer['biax, left'],
st.lower_shear_web_3.layer['foam'],
st.lower_shear_web_3.layer['biax, right']
],
alt_TE_reinforcement=True,
soft_warning=True)
| gpl-3.0 |
iamfullofspam/hep_ml | hep_ml/reweight.py | 1 | 18499 | """
**hep_ml.reweight** contains reweighting algorithms.
Reweighting is procedure of finding such weights for original distribution,
that make distribution of one or several variables identical in original distribution and target distribution.
Typical application of this technique in HEP is reweighting of Monte-Carlo simulation results to minimize
disagreement between simulated data and real data.
Frequently the reweighting rule is trained on one part of data (normalization channel)
and applied to different (signal channel).
Remark: if each variable has identical distribution in two samples,
this doesn't imply that multidimensional distributions are equal (almost surely they aren't).
Aim of reweighters is to get identical multidimensional distributions.
Algorithms are implemented as estimators, fitting and reweighting stages are split.
Fitted reweighter can be applied many times to different data, pickled and so on.
Folding over reweighter is also availabel. This provides an easy way to run k-Folding cross-validation.
Also it is a nice way to combine weights predictions of trained reweighters.
Examples
________
The most common use case is reweighting of Monte-Carlo simulations results to sPlotted real data.
(original weights are all equal to 1 and could be skipped, but left here for example)
>>> from hep_ml.reweight import BinsReweighter, GBReweighter
>>> original_weights = numpy.ones(len(MC_data))
>>> reweighter = BinsReweighter(n_bins=100, n_neighs=3)
>>> reweighter.fit(original=MC_data, target=RealData,
>>> original_weight=original_weights, target_weight=sWeights)
>>> MC_weights = reweighter.predict_weights(MC_data, original_weight=original_weights)
The same example for `GBReweighter`:
>>> reweighter = GBReweighter(max_depth=2, gb_args={'subsample': 0.5})
>>> reweighter.fit(original=MC_data, target=RealData, target_weight=sWeights)
>>> MC_weights = reweighter.predict_weights(MC_data)
Folding over reweighter:
>>> reweighter_base = GBReweighter(max_depth=2, gb_args={'subsample': 0.5})
>>> reweighter = FoldingReweighter(reweighter_base, n_folds=3)
>>> reweighter.fit(original=MC_data, target=RealData, target_weight=sWeights)
If the same data used in the training process are predicted by folding reweighter
weights predictions will be unbiased: each reweighter predicts only those part of data which is not used during its training
>>> MC_weights = reweighter.predict_weights(MC_data)
"""
from __future__ import division, print_function, absolute_import
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn import clone
from scipy.ndimage import gaussian_filter
import numpy
from .commonutils import check_sample_weight, weighted_quantile
from . import gradientboosting as gb
from . import losses
__author__ = 'Alex Rogozhnikov, Tatiana Likhomanenko'
__all__ = ['BinsReweighter', 'GBReweighter', 'FoldingReweighter']
def _bincount_nd(x, weights, shape):
"""
Does the same thing as numpy.bincount, but allows binning in several integer variables.
:param x: numpy.array of shape [n_samples, n_features] with non-negative integers
:param weights: weights of samples, array of shape [n_samples]
:param shape: shape of result, should be greater, then maximal value
:return: weighted number of event in each bin, of shape=shape
"""
assert len(weights) == len(x), 'length of weight is different: {} {}'.format(len(x), len(weights))
assert x.shape[1] == len(shape), 'wrong length of shape: {} {}'.format(x.shape[1], len(shape))
maximals = numpy.max(x, axis=0)
assert numpy.all(maximals < shape), 'small shape passed: {} {}'.format(maximals, shape)
result = numpy.zeros(shape, dtype=float)
numpy.add.at(result, tuple(x.T), weights)
return result
class ReweighterMixin(object):
"""Supplementary class which shows the interface of reweighter.
Reweighters should be derived from this class."""
n_features_ = None
def _normalize_input(self, data, weights, normalize=True):
""" Normalize input of reweighter
:param data: array like of shape [n_samples] or [n_samples, n_features]
:param weights: array-like of shape [n_samples] or None
:return: tuple with
data - numpy.array of shape [n_samples, n_features]
weights - numpy.array of shape [n_samples] with mean = 1.
"""
weights = check_sample_weight(data, sample_weight=weights, normalize=normalize)
data = numpy.array(data)
if len(data.shape) == 1:
data = data[:, numpy.newaxis]
if self.n_features_ is None:
self.n_features_ = data.shape[1]
assert self.n_features_ == data.shape[1], \
'number of features is wrong: {} {}'.format(self.n_features_, data.shape[1])
return data, weights
def fit(self, original, target, original_weight, target_weight):
raise NotImplementedError('To be overriden in descendants')
def predict_weights(self, original, original_weight=None):
raise NotImplementedError('To be overriden in descendants')
class BinsReweighter(BaseEstimator, ReweighterMixin):
def __init__(self, n_bins=200, n_neighs=3.):
"""
Use bins for reweighting. Bins' edges are computed using quantiles along each axis
(which is better than bins of even size).
This method works fine for 1d/2d histograms,
while being unstable or inaccurate for higher dimensions.
To make computed rule more smooth and stable, after computing weights in bins,
gaussian filter is applied (so reweighting coefficient also includes information from neighbouring bins).
:param int n_bins: how many bins to use for each input variable.
:param float n_neighs: size of gaussian filter (in bins).
This parameter is responsible for tradeoff between stability of rule and accuracy of predictions.
With increase of n_neighs the reweighting rule becomes more stable.
"""
self.n_bins = n_bins
self.n_neighs = n_neighs
# if number of events in bins is less than this value, number of events is clipped.
self.min_in_the_bin = 1.
def compute_bin_indices(self, data):
"""
Compute id of bin along each axis.
:param data: data, array-like of shape [n_samples, n_features]
with the same order of features as in training
:return: numpy.array of shape [n_samples, n_features] with integers, each from [0, n_bins - 1]
"""
bin_indices = []
for axis, axis_edges in enumerate(self.edges):
bin_indices.append(numpy.searchsorted(axis_edges, data[:, axis]))
return numpy.array(bin_indices).T
def fit(self, original, target, original_weight=None, target_weight=None):
"""
Prepare reweighting formula by computing histograms.
:param original: values from original distribution, array-like of shape [n_samples, n_features]
:param target: values from target distribution, array-like of shape [n_samples, n_features]
:param original_weight: weights for samples of original distributions
:param target_weight: weights for samples of original distributions
:return: self
"""
self.n_features_ = None
original, original_weight = self._normalize_input(original, original_weight)
target, target_weight = self._normalize_input(target, target_weight)
target_perc = numpy.linspace(0, 1, self.n_bins + 1)[1:-1]
self.edges = []
for axis in range(self.n_features_):
self.edges.append(weighted_quantile(target[:, axis], quantiles=target_perc, sample_weight=target_weight))
bins_weights = []
for data, weights in [(original, original_weight), (target, target_weight)]:
bin_indices = self.compute_bin_indices(data)
bin_w = _bincount_nd(bin_indices, weights=weights, shape=[self.n_bins] * self.n_features_)
smeared_weights = gaussian_filter(bin_w, sigma=self.n_neighs, truncate=2.5)
bins_weights.append(smeared_weights.clip(self.min_in_the_bin))
bin_orig_weights, bin_targ_weights = bins_weights
self.transition = bin_targ_weights / bin_orig_weights
return self
def predict_weights(self, original, original_weight=None):
"""
Returns corrected weights. Result is computed as original_weight * reweighter_multipliers.
:param original: values from original distribution of shape [n_samples, n_features]
:param original_weight: weights of samples before reweighting.
:return: numpy.array of shape [n_samples] with new weights.
"""
original, original_weight = self._normalize_input(original, original_weight)
bin_indices = self.compute_bin_indices(original)
results = self.transition[tuple(bin_indices.T)] * original_weight
return results
class GBReweighter(BaseEstimator, ReweighterMixin):
def __init__(self,
n_estimators=40,
learning_rate=0.2,
max_depth=3,
min_samples_leaf=200,
loss_regularization=5.,
gb_args=None):
"""
Gradient Boosted Reweighter - a reweighter algorithm based on ensemble of regression trees.
Parameters have the same role, as in gradient boosting.
Special loss function is used, trees are trained to maximize symmetrized binned chi-squared statistics.
Training takes much more time than for bin-based versions, but `GBReweighter` is capable
to work in high dimensions while keeping reweighting rule reliable and precise
(and even smooth if many trees are used).
:param n_estimators: number of trees
:param learning_rate: float from [0, 1]. Lesser learning rate requires more trees,
but makes reweighting rule more stable.
:param max_depth: maximal depth of trees
:param min_samples_leaf: minimal number of events in the leaf.
:param loss_regularization: float, approximately equal to number of events
that algorithm 'puts' in each leaf to prevent exploding.
:param gb_args: other parameters passed to gradient boosting.
Those are: subsample, min_samples_split, max_features, max_leaf_nodes
For example: gb_args = {'subsample': 0.8, 'max_features': 0.75}
See :class:`hep_ml.gradientboosting.UGradientBoostingClassifier`.
"""
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.gb_args = gb_args
self.loss_regularization = loss_regularization
def fit(self, original, target, original_weight=None, target_weight=None):
"""
Prepare reweighting formula by training sequence of trees.
:param original: values from original distribution, array-like of shape [n_samples, n_features]
:param target: values from target distribution, array-like of shape [n_samples, n_features]
:param original_weight: weights for samples of original distributions
:param target_weight: weights for samples of original distributions
:return: self
"""
self.n_features_ = None
if self.gb_args is None:
self.gb_args = {}
original, original_weight = self._normalize_input(original, original_weight)
target, target_weight = self._normalize_input(target, target_weight)
loss = losses.ReweightLossFunction(regularization=self.loss_regularization)
self.gb = gb.UGradientBoostingClassifier(loss=loss,
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
learning_rate=self.learning_rate,
**self.gb_args)
data = numpy.vstack([original, target])
target = numpy.array([1] * len(original) + [0] * len(target))
weights = numpy.hstack([original_weight, target_weight])
self.gb.fit(data, target, sample_weight=weights)
return self
def predict_weights(self, original, original_weight=None):
"""
Returns corrected weights. Result is computed as original_weight * reweighter_multipliers.
:param original: values from original distribution of shape [n_samples, n_features]
:param original_weight: weights of samples before reweighting.
:return: numpy.array of shape [n_samples] with new weights.
"""
original, original_weight = self._normalize_input(original, original_weight)
multipliers = numpy.exp(self.gb.decision_function(original))
return multipliers * original_weight
class FoldingReweighter(BaseEstimator, ReweighterMixin):
def __init__(self, base_reweighter, n_folds=2, random_state=None, verbose=True):
"""
This meta-regressor implements folding algorithm over reweighter:
* training data is splitted into n equal parts;
* we train n reweighters, each one is trained using n-1 folds
To build unbiased predictions for data, pass the **same** dataset (with same order of events)
as in training to `predict_weights`, in which case
a reweighter will be used to predict each event that the reweighter didn't use it during training.
To use information from not one, but several reweighters during predictions,
provide appropriate voting function. Examples of voting function:
>>> voting = lambda x: numpy.mean(x, axis=0)
>>> voting = lambda x: numpy.median(x, axis=0)
:param base_reweighter: base reweighter object
:type base_reweighter: ReweighterMixin
:param n_folds: number of folds
:param random_state: random state for reproducibility
:type random_state: None or int or RandomState
:param bool verbose:
"""
self.n_folds = n_folds
self.random_state = random_state
self.verbose = verbose
self.base_reweighter = base_reweighter
self.reweighters = []
self._random_number = None
self.train_length = None
def _get_folds_column(self, length):
"""
Return special column with indices of folds for all events.
"""
if self._random_number is None:
self._random_number = check_random_state(self.random_state).randint(0, 100000)
folds_column = numpy.arange(length) % self.n_folds
folds_column = numpy.random.RandomState(self._random_number).permutation(folds_column)
return folds_column
def fit(self, original, target, original_weight=None, target_weight=None):
"""
Prepare reweighting formula by training a sequence of trees.
:param original: values from original distribution, array-like of shape [n_samples, n_features]
:param target: values from target distribution, array-like of shape [n_samples, n_features]
:param original_weight: weights for samples of original distributions
:param target_weight: weights for samples of original distributions
:return: self
"""
original, original_weight = self._normalize_input(original, original_weight, normalize=False)
target, target_weight = self._normalize_input(target, target_weight, normalize=False)
folds_original = self._get_folds_column(len(original))
folds_target = self._get_folds_column(len(target))
for _ in range(self.n_folds):
self.reweighters.append(clone(self.base_reweighter))
original = numpy.array(original)
target = numpy.array(target)
for i in range(self.n_folds):
self.reweighters[i].fit(original[folds_original != i, :], target[folds_target != i, :],
original_weight=original_weight[folds_original != i],
target_weight=target_weight[folds_target != i])
self.train_length = len(original)
return self
def predict_weights(self, original, original_weight=None, vote_function=None):
"""
Returns corrected weights. Result is computed as original_weight * reweighter_multipliers.
:param original: values from original distribution of shape [n_samples, n_features]
:param original_weight: weights of samples before reweighting.
:return: numpy.array of shape [n_samples] with new weights.
:param vote_function: if using averaging over predictions of folds, this function shall be passed.
For instance: lambda x: numpy.mean(x, axis=0), which means averaging result over all folds.
Another useful option is lambda x: numpy.median(x, axis=0)
"""
original, original_weight = self._normalize_input(original, original_weight, normalize=False)
if vote_function is not None:
if self.verbose:
print('KFold prediction with voting function')
results = []
for reweighter in self.reweighters:
results.append(reweighter.predict_weights(original, original_weight=original_weight))
# results: [n_classifiers, n_samples], reduction is expected over 0th axis
results = numpy.array(results)
return vote_function(results)
else:
if self.verbose:
if len(original) != self.train_length:
print('KFold prediction using random reweighter '
'(length of data passed not equal to length of train)')
else:
print('KFold prediction using folds column')
folds_original = self._get_folds_column(len(original))
new_original_weight = numpy.zeros(len(original))
original = numpy.asarray(original)
for i in range(self.n_folds):
new_original_weight[folds_original == i] = self.reweighters[i].predict_weights(
original[folds_original == i, :], original_weight=original_weight[folds_original == i])
return new_original_weight
| apache-2.0 |
wanggang3333/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/cluster/mean_shift_.py | 2 | 16511 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from joblib import Parallel, delayed
from collections import defaultdict
from ..utils.validation import check_is_fitted
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=None):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None (default)
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,"
" got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in bin_sizes.items() if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Examples
--------
>>> from sklearn.cluster import MeanShift
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = MeanShift(bandwidth=2).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering.predict([[0, 0], [5, 5]])
array([1, 0])
>>> clustering
MeanShift(bandwidth=2)
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=None):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
y : Ignored
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
hennersz/pySpace | basemap/examples/animate.py | 4 | 4681 | # example using matplotlib.animation to create a movie
# reads data over http - needs an active internet connection.
import numpy as np
import matplotlib.pyplot as plt
import numpy.ma as ma
import datetime, time
from mpl_toolkits.basemap import Basemap, shiftgrid
from netCDF4 import Dataset as NetCDFFile, date2index, num2date
import matplotlib.animation as animation
# times for March 1993 'storm of the century'
date1 = datetime.datetime(1993,3,10,0)
date2 = datetime.datetime(1993,3,17,0)
# set OpenDAP server URL.
URL="http://nomad2.ncep.noaa.gov:9090/dods/reanalyses/reanalysis-2/6hr/pgb/pgb"
try:
data = NetCDFFile(URL)
except:
raise IOError('opendap server not providing the requested data')
# read lats,lons,times.
latitudes = data.variables['lat'][:]
longitudes = data.variables['lon'][:].tolist()
times = data.variables['time']
ntime1 = date2index(date1,times,calendar='standard')
ntime2 = date2index(date2,times,calendar='standard')
# get sea level pressure and 10-m wind data.
slpdata = data.variables['presmsl']
udata = data.variables['ugrdprs']
vdata = data.variables['vgrdprs']
# mult slp by 0.01 to put in units of millibars.
slpin = 0.01*slpdata[ntime1:ntime2+1,:,:]
uin = udata[ntime1:ntime2+1,0,:,:]
vin = vdata[ntime1:ntime2+1,0,:,:]
dates = num2date(times[ntime1:ntime2+1], times.units, calendar='standard')
# add cyclic points
slp = np.zeros((slpin.shape[0],slpin.shape[1],slpin.shape[2]+1),np.float64)
slp[:,:,0:-1] = slpin; slp[:,:,-1] = slpin[:,:,0]
u = np.zeros((uin.shape[0],uin.shape[1],uin.shape[2]+1),np.float64)
u[:,:,0:-1] = uin; u[:,:,-1] = uin[:,:,0]
v = np.zeros((vin.shape[0],vin.shape[1],vin.shape[2]+1),np.float64)
v[:,:,0:-1] = vin; v[:,:,-1] = vin[:,:,0]
longitudes.append(360.); longitudes = np.array(longitudes)
# make 2-d grid of lons, lats
lons, lats = np.meshgrid(longitudes,latitudes)
# make orthographic basemap.
m = Basemap(resolution='c',projection='ortho',lat_0=60.,lon_0=-60.)
uin = udata[ntime1:ntime2+1,0,:,:]
vin = vdata[ntime1:ntime2+1,0,:,:]
# create figure, add axes (leaving room for colorbar on right)
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.7,0.7])
# set desired contour levels.
clevs = np.arange(960,1061,5)
# compute native x,y coordinates of grid.
x, y = m(lons, lats)
# define parallels and meridians to draw.
parallels = np.arange(-80.,90,20.)
meridians = np.arange(0.,360.,20.)
# number of repeated frames at beginning and end is n1.
nframe = 0; n1 = 10
pos = ax.get_position()
l, b, w, h = pos.bounds
# loop over times, make contour plots, draw coastlines,
# parallels, meridians and title.
nt = 0; date = dates[nt]
CS1 = m.contour(x,y,slp[nt,:,:],clevs,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,slp[nt,:,:],clevs,cmap=plt.cm.RdBu_r)
# plot wind vectors on lat/lon grid.
# rotate wind vectors to map projection coordinates.
#urot,vrot = m.rotate_vector(u[nt,:,:],v[nt,:,:],lons,lats)
# plot wind vectors over map.
#Q = m.quiver(x,y,urot,vrot,scale=500)
# plot wind vectors on projection grid (looks better).
# first, shift grid so it goes from -180 to 180 (instead of 0 to 360
# in longitude). Otherwise, interpolation is messed up.
ugrid,newlons = shiftgrid(180.,u[nt,:,:],longitudes,start=False)
vgrid,newlons = shiftgrid(180.,v[nt,:,:],longitudes,start=False)
# transform vectors to projection grid.
urot,vrot,xx,yy = m.transform_vector(ugrid,vgrid,newlons,latitudes,51,51,returnxy=True,masked=True)
# plot wind vectors over map.
Q = m.quiver(xx,yy,urot,vrot,scale=500,zorder=10)
# make quiver key.
qk = plt.quiverkey(Q, 0.1, 0.1, 20, '20 m/s', labelpos='W')
# draw coastlines, parallels, meridians, title.
m.drawcoastlines(linewidth=1.5)
m.drawparallels(parallels)
m.drawmeridians(meridians)
txt = plt.title('SLP and Wind Vectors '+str(date))
# plot colorbar on a separate axes (only for first frame)
cax = plt.axes([l+w-0.05, b, 0.03, h]) # setup colorbar axes
fig.colorbar(CS2,drawedges=True, cax=cax) # draw colorbar
cax.text(0.0,-0.05,'mb')
plt.axes(ax) # reset current axes
def updatefig(nt):
global CS1,CS2,Q
date = dates[nt]
for c in CS1.collections: c.remove()
CS1 = m.contour(x,y,slp[nt,:,:],clevs,linewidths=0.5,colors='k')
for c in CS2.collections: c.remove()
CS2 = m.contourf(x,y,slp[nt,:,:],clevs,cmap=plt.cm.RdBu_r)
ugrid,newlons = shiftgrid(180.,u[nt,:,:],longitudes,start=False)
vgrid,newlons = shiftgrid(180.,v[nt,:,:],longitudes,start=False)
urot,vrot,xx,yy = m.transform_vector(ugrid,vgrid,newlons,latitudes,51,51,returnxy=True,masked=True)
txt.set_text('SLP and Wind Vectors '+str(date))
Q.set_UVC(urot,vrot)
ani = animation.FuncAnimation(fig, updatefig, frames=len(dates))
#ani.save('movie.mp4')
plt.show()
| gpl-3.0 |
Blitzman/CarND-Advanced-Lane-Lines | all.py | 1 | 19298 | import cv2
import glob
import numpy as np
import seaborn as sbs
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
class Line():
def __init__ (self):
self.detected = False
self.recent_xfitted = []
self.bestx = None
self.best_fit = None
self.current_fit = [np.array([False])]
self.radius_of_curvature = None
self.line_base_pos = None
self.diffs = np.array([0, 0, 0], dtype='float')
self.allx = None
self.ally = None
###################################################################################################
## Camera Calibration
###################################################################################################
print('Camera calibration...')
nx = 9 # Number of columns
ny = 6 # Number of rows
obj_points = [] # 3D points in real-world space
img_points = [] # 2D points in image plane
objp = np.zeros((nx * ny, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
calibration_filenames = glob.glob("camera_cal/calibration*.jpg")
image_shape = []
for calibration_filename in calibration_filenames:
print('Getting object and image points for ' + calibration_filename)
calibration_img = mpimg.imread(calibration_filename)
grayscale = cv2.cvtColor(calibration_img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(grayscale, (nx, ny), None)
if ret == True:
image_shape = grayscale.shape[::-1]
img_points.append(corners)
obj_points.append(objp)
ret, camera_mtx, dist_coeffs, r_vecs, t_vecs = cv2.calibrateCamera(obj_points, img_points, image_shape, None, None)
print('Calibration done...')
print(camera_mtx)
print(dist_coeffs)
print(r_vecs)
print(t_vecs)
###################################################################################################
## Test Undistort
###################################################################################################
test_undistort = True
if test_undistort == True:
for calibration_filename in calibration_filenames:
calibration_img = mpimg.imread(calibration_filename)
undistorted = cv2.undistort(calibration_img, camera_mtx, dist_coeffs, None, camera_mtx)
# Plot original and undistorted image
sbs.set_style("dark")
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
ax1.imshow(calibration_img)
ax1.set_title('Original Image')
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image')
f.savefig("camera_undistorted/" + calibration_filename)
###################################################################################################
## Pipeline
###################################################################################################
left_line = Line()
right_line = Line()
def pipeline(img, filename = None):
###############################################################################################
## Undistort Image
###############################################################################################
undistorted_image = None
if filename != None:
print()
print("Undistorting...")
undistorted_image = cv2.undistort(img, camera_mtx, dist_coeffs, None, camera_mtx)
# Plot original and undistorted image
if filename != None:
sbs.set_style("dark")
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
ax1.imshow(img)
ax1.set_title('Original Image')
ax2.imshow(undistorted_image)
ax2.set_title('Undistorted Image')
f.savefig("test_undistorted/" + filename)
###################################################################################################
## Perspective Transform
###################################################################################################
if filename != None:
print()
print("Perspective transformations...")
def perspective_transform(img, src_points, dst_points):
img_size = (img.shape[1], img.shape[0])
src = np.float32(src_points)
dst = np.float32(dst_points)
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size)
return warped, M
transformed_image = None
transformation_matrix = None
src_tl = [570, 470]
src_tr = [720, 470]
src_br = [1130, 720]
src_bl = [200, 720]
dst_tl = [320, 0]
dst_tr = [980, 0]
dst_br = [980, 720]
dst_bl = [320, 720]
src_points = [src_tl, src_tr, src_br, src_bl]
dst_points = [dst_tl, dst_tr, dst_br, dst_bl]
transformed_image, transformation_matrix = perspective_transform(undistorted_image, src_points, dst_points)
undistorted_image_lines = undistorted_image.copy()
cv2.line(undistorted_image_lines, tuple(src_tl), tuple(src_tr), [0, 0, 255], 8)
cv2.line(undistorted_image_lines, tuple(src_tr), tuple(src_br), [0, 0, 255], 8)
cv2.line(undistorted_image_lines, tuple(src_br), tuple(src_bl), [0, 0, 255], 8)
cv2.line(undistorted_image_lines, tuple(src_bl), tuple(src_tl), [0, 0, 255], 8)
transformed_image_lines = transformed_image.copy()
cv2.line(transformed_image_lines, tuple(dst_tl), tuple(dst_tr), [0, 0, 255], 8)
cv2.line(transformed_image_lines, tuple(dst_tr), tuple(dst_br), [0, 0, 255], 8)
cv2.line(transformed_image_lines, tuple(dst_br), tuple(dst_bl), [0, 0, 255], 8)
cv2.line(transformed_image_lines, tuple(dst_bl), tuple(dst_tl), [0, 0, 255], 8)
# Plot original and transformed image
if filename != None:
sbs.set_style("dark")
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
ax1.set_title('Original Image')
ax1.imshow(undistorted_image_lines)
ax2.set_title('Transformed Image')
ax2.imshow(transformed_image_lines)
f.savefig("test_transformed/" + filename)
###################################################################################################
## Thresholding
###################################################################################################
def threshold_x_gradient (img, sobel_size = 3, threshold = [0, 255]):
grayscale = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel_x = cv2.Sobel(grayscale, cv2.CV_64F, 1, 0)
abs_sobel_x = np.absolute(sobel_x)
scaled_sobel_x = np.uint(255 * abs_sobel_x / np.max(abs_sobel_x))
binary_output = np.zeros_like(scaled_sobel_x)
binary_output[(scaled_sobel_x >= threshold[0]) & (scaled_sobel_x <= threshold[1])] = 1
return binary_output
def threshold_hls_s_gradient (img, threshold = [0, 255]):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel >= threshold[0]) & (s_channel <= threshold[1])] = 1
return binary_output
def threshold_hsv (img, threshold_low = np.array([0, 0, 0]), threshold_high = np.array([255, 255, 255])):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv, threshold_low, threshold_high)
binary_output = np.zeros_like(hsv[:, :, 2])
binary_output[(mask > 0)] = 1
return binary_output
def sobel (img, sobel_size = 3, sobel_x = 0, sobel_y = 0, threshold = [0, 255]):
sobel = cv2.Sobel(img, cv2.CV_64F, sobel_x, sobel_y)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint(255 * abs_sobel / np.max(abs_sobel))
binary_output = np.zeros_like(img)
binary_output[(scaled_sobel >= threshold[0]) & (scaled_sobel <= threshold[1])] = 1
return binary_output
def threshold_sobel_ls (img, sobel_size = 3, threshold = [0, 255]):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
l_channel = hls[:, :, 1]
sobel_s_x = sobel(s_channel, sobel_size, 1, 0, threshold)
sobel_s_y = sobel(s_channel, sobel_size, 0, 1, threshold)
sobel_l_x = sobel(l_channel, sobel_size, 1, 0, threshold)
sobel_l_y = sobel(l_channel, sobel_size, 0, 1, threshold)
binary_output = np.zeros_like(s_channel)
## Y is not taken into account
binary_output[(sobel_s_x == 1) | (sobel_l_x == 1)] = 1
return binary_output
def gaussian_blur (img, kernel_size = 3):
blurred = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
return blurred
thresholded_image = None
if filename != None:
print()
print("Thresholding...")
#x_binary = threshold_x_gradient(undistorted_image, 3, [20, 100])
#s_binary = threshold_hls_s_gradient(undistorted_image, [170, 255])
yellow_binary = threshold_hsv(transformed_image, np.array([20, 100, 100]), np.array([30, 255, 255]))
white_binary = threshold_hsv(transformed_image, np.array([0, 0, 223]), np.array([255, 32, 255]))
sobel_binary = threshold_sobel_ls(transformed_image, 5, [50, 255])
colored_binary = np.dstack((white_binary, yellow_binary, sobel_binary))
thresholded_image = np.zeros_like(yellow_binary)
thresholded_image[(white_binary == 1) | (yellow_binary == 1) | (sobel_binary == 1)] = 1
thresholded_image = gaussian_blur(thresholded_image, 25)
# Plot original and thresholded image
if filename != None:
sbs.set_style("dark")
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 3))
ax1.set_title('Original Image')
ax1.imshow(transformed_image)
ax2.set_title('Stacked Thresholds')
ax2.imshow(np.uint8(colored_binary * 255.999))
f.savefig("test_thresholded/" + filename)
###################################################################################################
## Finding Lines
###################################################################################################
if filename != None:
print()
print("Line finding...")
def find_peaks (img, filename = None):
histogram = np.sum(img[img.shape[0]//2:, :], axis = 0)
m = np.int(histogram.shape[0]/2)
l = np.argmax(histogram[:m])
r = np.argmax(histogram[m:]) + m
if (filename != None):
sbs.set_style("dark")
f = plt.figure()
plt.title('Histogram')
plt.plot(histogram)
plt.imshow(np.uint8(img * 255.999))
f.savefig("test_histogram/" + filename)
return m, l, r
def sliding_window_lines (img, left, right, n_windows = 9, margin = 100, minpix = 100):
window_height = np.int(img.shape[0] / n_windows)
nonzero = img.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
left_current = left
right_current = right
left_lane_indices = []
right_lane_indices = []
left_rects = []
right_rects = []
for window in range(n_windows):
window_y_low = img.shape[0] - (window + 1) * window_height
window_y_high = img.shape[0] - window * window_height
window_x_left_low = left_current - margin
window_x_left_high = left_current + margin
window_x_right_low = right_current - margin
window_x_right_high = right_current + margin
left_rects.append([(window_x_left_low, window_y_low), (window_x_left_high, window_y_high)])
right_rects.append([(window_x_right_low, window_y_low), (window_x_right_high, window_y_high)])
good_left_indices = ((nonzero_y >= window_y_low) & (nonzero_y < window_y_high) & (nonzero_x >= window_x_left_low) & (nonzero_x < window_x_left_high)).nonzero()[0]
good_right_indices = ((nonzero_y >= window_y_low) & (nonzero_y < window_y_high) & (nonzero_x >= window_x_right_low) & (nonzero_x < window_x_right_high)).nonzero()[0]
left_lane_indices.append(good_left_indices)
right_lane_indices.append(good_right_indices)
if len(good_left_indices) > minpix:
left_current = np.int(np.mean(nonzero_x[good_left_indices]))
if len(good_right_indices) > minpix:
right_current = np.int(np.mean(nonzero_x[good_right_indices]))
left_lane_indices = np.concatenate(left_lane_indices)
right_lane_indices = np.concatenate(right_lane_indices)
left_x = nonzero_x[left_lane_indices]
left_y = nonzero_y[left_lane_indices]
right_x = nonzero_x[right_lane_indices]
right_y = nonzero_y[right_lane_indices]
return left_rects, left_x, left_y, right_rects, right_x, right_y
lines_image = None
plot_y = np.linspace(0, transformed_image[0].shape[0]-1, transformed_image[0].shape[0])
mid, left, right = find_peaks(thresholded_image, filename)
left_r, left_x, left_y, right_r, right_x, right_y = sliding_window_lines(thresholded_image, left, right)
lines_image = (np.dstack((thresholded_image, thresholded_image, thresholded_image)) * 255).astype(np.uint8).copy()
for left_rect, right_rect in zip(left_r, right_r):
cv2.rectangle(lines_image, left_rect[0], left_rect[1], (255, 255, 0), 4)
cv2.rectangle(lines_image, right_rect[0], right_rect[1], (255, 255, 0), 4)
if left_x.size:
left_fit = np.polyfit(left_y, left_x, 2)
left_fit_x = left_fit[0] * plot_y ** 2 + left_fit[1] * plot_y + left_fit[2]
left_line.bestx = left_fit_x
left_line.current_fit = left_fit
left_line.allx = left_x
left_line.ally = left_y
if right_x.size:
right_fit = np.polyfit(right_y, right_x, 2)
right_fit_x = right_fit[0] * plot_y ** 2 + right_fit[1] * plot_y + right_fit[2]
right_line.bestx = right_fit_x
right_line.current_fit = right_fit
right_line.allx = right_x
right_line.ally = right_y
lines_image[left_line.ally, left_line.allx] = [255, 0 ,0]
lines_image[right_line.ally, right_line.allx] = [0, 0, 255]
# Plot original and lines image
if filename != None:
f = plt.figure()
plt.imshow(lines_image)
plt.plot(left_fit_x, plot_y, color='yellow')
plt.plot(right_fit_x, plot_y, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
f.savefig("test_lines/" + filename)
###################################################################################################
## Compute Curvature
###################################################################################################
if filename != None:
print()
print("Curvature computation...")
def correct_curve(plot_y, line_x, curve_fit, ympp = 30/720, xmpp = 3.7/700):
curve_fit_cr = np.polyfit(plot_y * ympp, line_x * xmpp, 2)
return curve_fit_cr
def compute_curvature(curve_fit, y_eval = 0, ympp = 30/720):
curvature = ((1 + (2 * curve_fit[0] * y_eval * ympp + curve_fit[1]) ** 2) ** 1.5) / np.absolute(2 * curve_fit[0])
return curvature
left_curve_fit_corrected = correct_curve(plot_y, left_line.bestx, left_line.current_fit)
left_line.curvature = compute_curvature(left_curve_fit_corrected, np.max(plot_y))
right_curve_fit_corrected = correct_curve(plot_y, right_line.bestx, right_line.current_fit)
right_line.curvature = compute_curvature(right_curve_fit_corrected, np.max(plot_y))
avg_curvature = (left_line.curvature + right_line.curvature) / 2.0
if filename != None:
print("Curvature left: " + str(left_line.curvature) + " meters")
print("Curvature right: " + str(right_line.curvature) + " meters")
print("Curvature: " + str(avg_curvature) + " meters")
###################################################################################################
## Compute Deviation
###################################################################################################
left_bottom = left_line.current_fit[0] * transformed_image.shape[0] ** 2 + left_line.current_fit[1] * transformed_image.shape[1] + left_line.current_fit[2]
right_bottom = right_line.current_fit[0] * transformed_image.shape[0] ** 2 + right_line.current_fit[1] * transformed_image.shape[1] + right_line.current_fit[2]
lane_center = (left_bottom + right_bottom) / 2.0
lane_deviation = lane_center - transformed_image.shape[1]/2
lane_deviation = np.round(lane_deviation / 2.81362, 2)
if filename != None:
print("Deviation: " + str(lane_deviation) + " cm.")
###################################################################################################
## Reprojection
###################################################################################################
if filename != None:
print()
print("Reprojection...")
warp_zero = np.zeros_like(thresholded_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
points_left = np.array([np.transpose(np.vstack([left_line.bestx, plot_y]))])
points_right = np.array([np.flipud(np.transpose(np.vstack([right_line.bestx, plot_y])))])
points = np.hstack((points_left, points_right))
cv2.fillPoly(color_warp, np.int_([points]), (0, 255, 0))
new_warp = cv2.warpPerspective(color_warp, np.linalg.inv(transformation_matrix), (transformed_image.shape[1], transformed_image.shape[0]))
warp_lines = cv2.warpPerspective(lines_image, np.linalg.inv(transformation_matrix), (transformed_image.shape[1], transformed_image.shape[0]))
result = cv2.addWeighted(undistorted_image, 1, new_warp, 0.3, 0)
result = cv2.addWeighted(result, 1, warp_lines, 0.3, 0.5)
cv2.putText(result, "Curvature " + str(avg_curvature) + " meters", (30, 60), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 3)
cv2.putText(result, "Deviation " + str(lane_deviation) + " centimeters", (30, 90), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 3)
# Plot original and reprojected image
if filename != None:
f = plt.figure()
plt.imshow(result)
f.savefig("test_poly/" + filename)
return result
###################################################################################################
## Load Test Images
###################################################################################################
test_images_filenames = glob.glob("test_images/*.jpg")
test_images = []
for test_image_filename in test_images_filenames:
print("Loading " + test_image_filename)
test_image = mpimg.imread(test_image_filename)
test_images.append(test_image)
for test_image, test_image_filename in zip(test_images, test_images_filenames):
print("Processing " + test_image_filename)
pipeline(test_image, test_image_filename)
###################################################################################################
## Video Processing
###################################################################################################
clip_output_filename = 'project_video_lines.mp4'
clip_input = VideoFileClip('project_video.mp4')
clip_output = clip_input.fl_image(pipeline)
clip_output.write_videofile(clip_output_filename, audio=False)
| mit |
MiroK/lega | sandbox/bendpy/eigen_solver.py | 1 | 3796 | from scipy.linalg import eigvalsh, eigvals, inv
import numpy as np
def eigensolver(coupled_assembler, s=None):
'''Return eigenvalues of [[A, B], [B.T, 0]] and then Shur complement'''
coupled_assembler.assemble_mat_blocks()
# Full system
AA = coupled_assembler.assemble_mat()
# Preconditioner
PA = foo.assemble_Apreconditioner(s)
print 'Getting eigenvalues of AA %d x %d' % AA.shape
# print PA.toarray()
AA_eigs = eigvalsh(AA.toarray(), PA.toarray())
# Schur
A = coupled_assembler.assemble_Amat()
B = coupled_assembler.assemble_Bmat().toarray()
Ainv = inv(A.toarray())
S = B.T.dot(Ainv.dot(B))
# Preconditioner
PS = foo.assemble_Spreconditioner(s)
print 'Getting eigenvalues of S %d x %d' % S.shape
S_eigs = eigvals(S, PS.toarray())
return AA_eigs, S_eigs
class EigsAnalysis(object):
def __init__(self, file_name):
self.root = './mekit_latex/data/%s' % file_name
self.out_file = open(self.root, 'w')
self.eig_files = []
def __call__(self, n, Aeigenvalues, Seigenvalues):
'Get smallest/largest(in magnitude) eigenvalues and the conditioner number.'
# Save eigenvalues
self.eig_files.append((n, '_'.join([self.root, str(n)])))
np.savetxt(self.eig_files[-1][1], Aeigenvalues)
# Process
N = len(Aeigenvalues)
eigs = np.sort(np.abs(Aeigenvalues))
lmin = np.min(eigs)
lmax = np.max(eigs)
cond = lmax/lmin
Slmin = np.min(np.abs(Seigenvalues))
names = ['n', 'N', 'lmin', 'lmax', 'cond', 'S_lmin']
values = [n, N, lmin, lmax, cond, Slmin]
msg = ' '.join(['N = %d'%N]+map(lambda (n, value): '%s = %.4E' % (n, value),
zip(names[1:], values[1:])))
# Stats for this
out_line = '\t'.join(map(str,values))
self.out_file.write(out_line + '\n')
return msg
def close(self):
self.out_file.close()
return self.eig_files
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from shen_du.shen_assembler import ShenSimpleAssembler
from sine_ddu.sine_assembler import SineSimpleAssembler
from beam_defs import PiLineBeam, LineBeam
import matplotlib.pyplot as plt
from math import pi
import sys
BLUE = '\033[1;37;34m%s\033[0m'
RED = '\033[1;37;31m%s\033[0m'
GREEN = "\033[1;37;32m%s\033[0m"
problem = sys.argv[1]
name = 'one_up_down'
A0 = [0, -1]
B0 = [0, 1]
A1 = [-1, 0]
B1 = [1, 0]
# name = 'bar'
# A0 = [-1., -1]
# B0 = [1, 1.]
# A1 = [-1., 1.]
# B1 = [1., -1.]
if problem == 'sine':
def to_ref(P):
'''Take from [-1, 1]^2 to [0, pi]'''
return [(pi*P[0] + pi)/2, (pi*P[1] + pi)/2]
A0, B0, A1, B1 = map(to_ref, (A0, B0, A1, B1))
beam0 = PiLineBeam(A0, B0)
beam1 = PiLineBeam(A1, B1)
bar = SineSimpleAssembler
elif problem == 'shen':
beam0 = LineBeam(A0, B0)
beam1 = LineBeam(A1, B1)
bar = ShenSimpleAssembler
beams = [beam0, beam1]
materials = [1, 1, 1]
s = -1.
analysis = EigsAnalysis('s_%s_%s_%d' % (name, problem, len(beams)))
for deg in range(4, 25, 2):
n_vector = [deg, deg, deg] #, deg]
foo = bar(n_vector=n_vector, beams=beams, materials=materials)
AA_eigs, S_eigs = eigensolver(foo, s)
print GREEN % analysis(deg, AA_eigs, S_eigs)
spectra_files = analysis.close()
plt.figure()
for n, lmbdas in spectra_files:
AA_eigs = np.loadtxt(lmbdas)
plt.plot(np.arange(1, len(AA_eigs)+1), AA_eigs, 'x', label=str(n))
plt.legend()
plt.show()
| mit |
Lekanich/intellij-community | python/helpers/pydev/pydev_ipython/qt_for_kernel.py | 67 | 2337 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| apache-2.0 |
biolab/orange | Orange/OrangeWidgets/OWGraph.py | 6 | 44661 | #
# owGraph.py
#
# the base for all graphs
from PyQt4.Qwt5 import *
from OWGraphTools import * # user defined curves, ...
from OWColorPalette import * # color palletes, ...
from OWDlgs import OWChooseImageSizeDlg
import orange, math, time
from OWBaseWidget import unisetattr
NOTHING = 0
ZOOMING = 1
SELECT_RECTANGLE = 2
SELECT_POLYGON = 3
PANNING = 4
SELECT = 5
class OWGraph(QwtPlot):
def __init__(self, parent = None, name = "None", showLegend=1):
"Constructs the graph"
QwtPlot.__init__(self, parent)
self.parentName = name
#self.setWindowFlags(Qt.WResizeNoErase) #this works like magic.. no flicker during repaint!
self.setAutoReplot(False)
self.setAxisAutoScale(QwtPlot.xBottom)
self.setAxisAutoScale(QwtPlot.xTop)
self.setAxisAutoScale(QwtPlot.yLeft)
self.setAxisAutoScale(QwtPlot.yRight)
self.axisTitleFont = QFont('Helvetica', 10, QFont.Bold)
text = QwtText("")
text.setFont(self.axisTitleFont)
self.setAxisTitle(QwtPlot.xBottom, text)
self.setAxisTitle(QwtPlot.xTop, text)
self.setAxisTitle(QwtPlot.yLeft, text)
self.setAxisTitle(QwtPlot.yRight, text)
ticksFont = QFont('Helvetica', 9)
self.setAxisFont(QwtPlot.xBottom, ticksFont)
self.setAxisFont(QwtPlot.xTop, ticksFont)
self.setAxisFont(QwtPlot.yLeft, ticksFont)
self.setAxisFont(QwtPlot.yRight, ticksFont)
#self.setLegendFont(ticksFont)
self.tipLeft = None
self.tipRight = None
self.tipBottom = None
self._cursor = Qt.ArrowCursor
self.showAxisScale = 1
self.showMainTitle = 0
self.showXaxisTitle = 0
self.showYLaxisTitle = 0
self.showYRaxisTitle = 0
self.mainTitle = None
self.XaxisTitle = None
self.YLaxisTitle = None
self.YRaxisTitle = None
self.useAntialiasing = 1
self.state = ZOOMING
self.tempSelectionCurve = None
self.selectionCurveList = []
self.autoSendSelectionCallback = None # callback function to call when we add new selection polygon or rectangle
self.sendSelectionOnUpdate = 0
self.showLegend = showLegend
if self.showLegend:
self.insertLegend(QwtLegend(), QwtPlot.BottomLegend)
self.gridCurve = QwtPlotGrid()
#self.gridCurve.attach(self)
self.mouseCurrentlyPressed = 0
self.mouseCurrentButton = 0
self.enableWheelZoom = 0
self.noneSymbol = QwtSymbol()
self.noneSymbol.setStyle(QwtSymbol.NoSymbol)
self.tips = TooltipManager(self)
self.statusBar = None
self.canvas().setMouseTracking(1)
self.setMouseTracking(1)
self.zoomStack = []
self.panPosition = None
self.optimizedDrawing = 1
self.pointWidth = 5
self.showFilledSymbols = 1
self.alphaValue = 255
self.setCanvasColor(QColor(Qt.white))
self.curveSymbols = [QwtSymbol.Ellipse, QwtSymbol.Rect, QwtSymbol.Triangle, QwtSymbol.Diamond, QwtSymbol.DTriangle, QwtSymbol.UTriangle, QwtSymbol.LTriangle, QwtSymbol.RTriangle, QwtSymbol.XCross, QwtSymbol.Cross]
#self.curveSymbols = [QwtSymbol.Triangle, QwtSymbol.Ellipse, QwtSymbol.Rect, QwtSymbol.Diamond, QwtSymbol.DTriangle, QwtSymbol.UTriangle, QwtSymbol.LTriangle, QwtSymbol.RTriangle, QwtSymbol.XCross, QwtSymbol.Cross]
# uncomment this if you want to use printer friendly symbols
#self.curveSymbols = [QwtSymbol.Ellipse, QwtSymbol.XCross, QwtSymbol.Triangle, QwtSymbol.Cross, QwtSymbol.Diamond, QwtSymbol.DTriangle, QwtSymbol.Rect, QwtSymbol.UTriangle, QwtSymbol.LTriangle, QwtSymbol.RTriangle]
self.contPalette = ColorPaletteGenerator(numberOfColors = -1)
self.discPalette = ColorPaletteGenerator()
# when using OWGraph we can define functions that will receive mouse move, press, release events. these functions
# HAVE TO RETURN whether the signal was handled, or you also want to use default OWGraph handler
self.mousePressEventHandler = None
self.mouseMoveEventHandler = None
self.mouseReleaseEventHandler = None
self.mouseStaticClickHandler = self.staticMouseClick
self.enableGridXB(0)
self.enableGridYL(0)
#self.updateLayout()
def setCursor(self, cursor):
self._cursor = cursor
self.canvas().setCursor(cursor)
def __setattr__(self, name, value):
unisetattr(self, name, value, QwtPlot)
# call to update dictionary with settings
def updateSettings(self, **settings):
self.__dict__.update(settings)
def saveToFile(self, extraButtons = []):
sizeDlg = OWChooseImageSizeDlg(self, extraButtons, parent=self)
sizeDlg.exec_()
def saveToFileDirect(self, fileName, size = None):
sizeDlg = OWChooseImageSizeDlg(self)
sizeDlg.saveImage(fileName, size)
def setTickLength(self, axis, minor, medium, major):
self.axisScaleDraw(axis).setTickLength(QwtScaleDiv.MinorTick, minor)
self.axisScaleDraw(axis).setTickLength(QwtScaleDiv.MediumTick, medium)
self.axisScaleDraw(axis).setTickLength(QwtScaleDiv.MajorTick, major)
def setYLlabels(self, labels):
"Sets the Y-axis labels on the left."
self.axisScaleDraw(QwtPlot.yLeft).enableComponent(QwtScaleDraw.Backbone, self.showAxisScale)
self.axisScaleDraw(QwtPlot.yLeft).enableComponent(QwtScaleDraw.Ticks, self.showAxisScale)
self.axisScaleDraw(QwtPlot.yLeft).enableComponent(QwtScaleDraw.Labels, self.showAxisScale)
if not self.showAxisScale:
return
#self.setTickLength(QwtPlot.yLeft, 1, 1, 3)
if (labels <> None):
self.setAxisScaleDraw(QwtPlot.yLeft, DiscreteAxisScaleDraw(labels))
self.setAxisScale(QwtPlot.yLeft, 0, len(labels) - 1, 1)
self.setAxisMaxMinor(QwtPlot.yLeft, 0)
self.setAxisMaxMajor(QwtPlot.yLeft, len(labels))
else:
self.setAxisScaleDraw(QwtPlot.yLeft, QwtScaleDraw())
self.setAxisAutoScale(QwtPlot.yLeft)
self.setAxisMaxMinor(QwtPlot.yLeft, 5)
self.setAxisMaxMajor(QwtPlot.yLeft, 8)
def setYRlabels(self, labels):
"Sets the Y-axis labels on the right."
self.axisScaleDraw(QwtPlot.yRight).enableComponent(QwtScaleDraw.Backbone, self.showAxisScale)
self.axisScaleDraw(QwtPlot.yRight).enableComponent(QwtScaleDraw.Ticks, self.showAxisScale)
self.axisScaleDraw(QwtPlot.yRight).enableComponent(QwtScaleDraw.Labels, self.showAxisScale)
if not self.showAxisScale:
return
if (labels <> None):
self.setAxisScaleDraw(QwtPlot.yRight, DiscreteAxisScaleDraw(labels))
self.setAxisScale(QwtPlot.yRight, 0, len(labels) - 1, 1)
self.setAxisMaxMinor(QwtPlot.yRight, 0)
self.setAxisMaxMajor(QwtPlot.yRight, len(labels))
else:
self.setAxisScaleDraw(QwtPlot.yRight, QwtScaleDraw())
self.setAxisAutoScale(QwtPlot.yRight)
self.setAxisMaxMinor(QwtPlot.yRight, 5)
self.setAxisMaxMajor(QwtPlot.yRight, 8)
def setXlabels(self, labels):
"Sets the x-axis labels if x-axis discrete."
"Or leave up to QwtPlot (MaxMajor, MaxMinor) if x-axis continuous."
self.axisScaleDraw(QwtPlot.xBottom).enableComponent(QwtScaleDraw.Backbone, self.showAxisScale)
self.axisScaleDraw(QwtPlot.xBottom).enableComponent(QwtScaleDraw.Ticks, self.showAxisScale)
self.axisScaleDraw(QwtPlot.xBottom).enableComponent(QwtScaleDraw.Labels, self.showAxisScale)
if not self.showAxisScale:
return
if (labels <> None):
self.setAxisScaleDraw(QwtPlot.xBottom, DiscreteAxisScaleDraw(labels))
self.setAxisScale(QwtPlot.xBottom, 0, len(labels) - 1, 1)
self.setAxisMaxMinor(QwtPlot.xBottom, 0)
self.setAxisMaxMajor(QwtPlot.xBottom, len(labels))
else:
self.setAxisScaleDraw(QwtPlot.xBottom, QwtScaleDraw())
self.setAxisAutoScale(QwtPlot.xBottom)
self.setAxisMaxMinor(QwtPlot.xBottom, 5)
self.setAxisMaxMajor(QwtPlot.xBottom, 8)
def enableXaxis(self, enable):
self.enableAxis(QwtPlot.xBottom, enable)
self.repaint()
def enableYLaxis(self, enable):
self.enableAxis(QwtPlot.yLeft, enable)
self.repaint()
def enableYRaxis(self, enable):
self.enableAxis(QwtPlot.yRight, enable)
self.repaint()
def setRightTip(self,explain):
"Sets the tooltip for the right y axis"
self.tipRight = explain
def setLeftTip(self,explain):
"Sets the tooltip for the left y axis"
self.tipLeft = explain
def setBottomTip(self,explain):
"Sets the tooltip for the left x axis"
self.tipBottom = explain
def setShowMainTitle(self, b):
self.showMainTitle = b
if self.showMainTitle and self.mainTitle:
self.setTitle(self.mainTitle)
else:
self.setTitle(QwtText())
self.repaint()
def setMainTitle(self, t):
self.mainTitle = t
if self.showMainTitle and self.mainTitle:
self.setTitle(self.mainTitle)
else:
self.setTitle(QwtText())
self.repaint()
def setShowXaxisTitle(self, b = -1):
if b == self.showXaxisTitle: return
if b != -1:
self.showXaxisTitle = b
if self.showXaxisTitle and self.XaxisTitle:
self.setAxisTitle(QwtPlot.xBottom, self.XaxisTitle)
else:
self.setAxisTitle(QwtPlot.xBottom, QwtText())
self.repaint()
def setXaxisTitle(self, title):
if title == self.XaxisTitle: return
self.XaxisTitle = title
if self.showXaxisTitle and self.XaxisTitle:
self.setAxisTitle(QwtPlot.xBottom, self.XaxisTitle)
else:
self.setAxisTitle(QwtPlot.xBottom, QwtText())
#self.updateLayout()
self.repaint()
def setShowYLaxisTitle(self, b = -1):
if b == self.showYLaxisTitle: return
if b != -1:
self.showYLaxisTitle = b
if self.showYLaxisTitle and self.YLaxisTitle:
self.setAxisTitle(QwtPlot.yLeft, self.YLaxisTitle)
else:
self.setAxisTitle(QwtPlot.yLeft, QwtText())
#self.updateLayout()
self.repaint()
def setYLaxisTitle(self, title):
if title == self.YLaxisTitle: return
self.YLaxisTitle = title
if self.showYLaxisTitle and self.YLaxisTitle:
self.setAxisTitle(QwtPlot.yLeft, self.YLaxisTitle)
else:
self.setAxisTitle(QwtPlot.yLeft, QwtText())
#self.updateLayout()
self.repaint()
def setShowYRaxisTitle(self, b = -1):
if b == self.showYRaxisTitle: return
if b != -1:
self.showYRaxisTitle = b
if self.showYRaxisTitle and self.YRaxisTitle:
self.setAxisTitle(QwtPlot.yRight, self.YRaxisTitle)
else:
self.setAxisTitle(QwtPlot.yRight, QwtText())
#self.updateLayout()
self.repaint()
def setYRaxisTitle(self, title):
if title == self.YRaxisTitle: return
self.YRaxisTitle = title
if self.showYRaxisTitle and self.YRaxisTitle:
self.setAxisTitle(QwtPlot.yRight, self.YRaxisTitle)
else:
self.setAxisTitle(QwtPlot.yRight, QwtText())
#self.updateLayout()
self.repaint()
def enableGridXB(self, b):
self.gridCurve.enableX(b)
self.replot()
def enableGridYL(self, b):
self.gridCurve.enableY(b)
self.replot()
def setGridColor(self, c):
self.gridCurve.setPen(QPen(c))
self.replot()
def setCanvasColor(self, c):
self.setCanvasBackground(c)
self.repaint()
# ############################################################
# functions that were previously in OWVisGraph
# ############################################################
def setData(self, data):
# clear all curves, markers, tips
self.clear()
self.removeAllSelections(0) # clear all selections
self.tips.removeAll()
self.zoomStack = []
# ####################################################################
# return string with attribute names and their values for example example
def getExampleTooltipText(self, example, indices = None, maxIndices = 20):
if indices and type(indices[0]) == str:
indices = [self.attributeNameIndex[i] for i in indices]
if not indices:
indices = range(len(self.dataDomain.attributes))
# don't show the class value twice
if example.domain.classVar:
classIndex = self.attributeNameIndex[example.domain.classVar.name]
while classIndex in indices:
indices.remove(classIndex)
text = "<b>Attributes:</b><br>"
for index in indices[:maxIndices]:
attr = self.attributeNames[index]
if attr not in example.domain: text += " "*4 + "%s = ?<br>" % (attr)
elif example[attr].isSpecial(): text += " "*4 + "%s = ?<br>" % (attr)
else: text += " "*4 + "%s = %s<br>" % (attr, str(example[attr]))
if len(indices) > maxIndices:
text += " "*4 + " ... <br>"
if example.domain.classVar:
text = text[:-4]
text += "<hr><b>Class:</b><br>"
if example.getclass().isSpecial(): text += " "*4 + "%s = ?<br>" % (example.domain.classVar.name)
else: text += " "*4 + "%s = %s<br>" % (example.domain.classVar.name, str(example.getclass()))
if len(example.domain.getmetas()) != 0:
text = text[:-4]
text += "<hr><b>Meta attributes:</b><br>"
# show values of meta attributes
for key in example.domain.getmetas():
try: text += " "*4 + "%s = %s<br>" % (example.domain[key].name, str(example[key]))
except: pass
return text[:-4] # remove the last <br>
def addCurve(self, name, brushColor = Qt.black, penColor = Qt.black, size = 5, style = QwtPlotCurve.NoCurve, symbol = QwtSymbol.Ellipse, enableLegend = 0, xData = [], yData = [], showFilledSymbols = None, lineWidth = 1, pen = None, autoScale = 0, antiAlias = None, penAlpha = 255, brushAlpha = 255):
curve = QwtPlotCurve(name)
curve.setRenderHint(QwtPlotItem.RenderAntialiased, antiAlias or self.useAntialiasing)
curve.setItemAttribute(QwtPlotItem.Legend, enableLegend)
curve.setItemAttribute(QwtPlotItem.AutoScale, autoScale)
if penAlpha != 255:
penColor.setAlpha(penAlpha)
if brushAlpha != 255:
brushColor.setAlpha(brushAlpha)
if showFilledSymbols or (showFilledSymbols == None and self.showFilledSymbols):
newSymbol = QwtSymbol(symbol, QBrush(brushColor), QPen(penColor), QSize(size, size))
else:
newSymbol = QwtSymbol(symbol, QBrush(), QPen(penColor), QSize(size, size))
curve.setSymbol(newSymbol)
curve.setStyle(style)
curve.setPen(pen != None and pen or QPen(penColor, lineWidth))
if xData != [] and yData != []:
curve.setData(xData, yData)
curve.attach(self)
return curve
def addMarker(self, name, x, y, alignment = -1, bold = 0, color = None, brushColor = None, size=None, antiAlias = None):
text = QwtText(name, QwtText.PlainText)
if color != None:
text.setColor(color)
text.setPaintAttribute(QwtText.PaintUsingTextColor, 1)
if brushColor != None:
text.setBackgroundBrush(QBrush(brushColor))
font = text.font()
if bold: font.setBold(1)
if size: font.setPixelSize(size)
text.setFont(font)
text.setPaintAttribute(QwtText.PaintUsingTextFont, 1)
#if alignment != -1: text.setRenderFlags(alignment)
marker = QwtPlotMarker()
marker.setLabel(text)
marker.setValue(x,y)
marker.setRenderHint(QwtPlotItem.RenderAntialiased, antiAlias == 1 or self.useAntialiasing)
if alignment != -1:
marker.setLabelAlignment(alignment)
marker.attach(self)
return marker
# show a tooltip at x,y with text. if the mouse will move for more than 2 pixels it will be removed
def showTip(self, x, y, text):
QToolTip.showText(self.mapToGlobal(QPoint(x, y)), text, self.canvas(), QRect(x-3,y-3,6,6))
# mouse was only pressed and released on the same spot. visualization methods might want to process this event
def staticMouseClick(self, e):
return 0
def activateZooming(self):
self.state = ZOOMING
if self.tempSelectionCurve: self.removeLastSelection()
def activateRectangleSelection(self):
self.state = SELECT_RECTANGLE
if self.tempSelectionCurve: self.removeLastSelection()
def activatePolygonSelection(self):
self.state = SELECT_POLYGON
if self.tempSelectionCurve: self.removeLastSelection()
def activatePanning(self):
self.state = PANNING
if self.tempSelectionCurve: self.removeLastSelection()
def activateSelection(self):
self.state = SELECT
def removeDrawingCurves(self, removeLegendItems = 1, removeSelectionCurves = 0, removeMarkers = 0):
for curve in self.itemList():
if not removeLegendItems and curve.testItemAttribute(QwtPlotItem.Legend):
continue
if not removeSelectionCurves and isinstance(curve, SelectionCurve):
continue
if not removeMarkers and isinstance(curve, QwtPlotMarker):
continue
curve.detach()
self.gridCurve.attach(self) # we also removed the grid curve
def removeMarkers(self):
self.detachItems(QwtPlotItem.Rtti_PlotMarker)
def removeLastSelection(self):
removed = 0
if self.selectionCurveList != []:
lastCurve = self.selectionCurveList.pop()
lastCurve.detach()
self.tempSelectionCurve = None
removed = 1
self.replot()
if self.autoSendSelectionCallback:
self.autoSendSelectionCallback() # do we want to send new selection
return removed
def removeAllSelections(self, send = 1):
selectionsExisted = len(self.selectionCurveList) > 0
self.detachItems(SelectionCurveRtti)
self.selectionCurveList = []
if selectionsExisted:
self.replot()
if send and self.autoSendSelectionCallback:
self.autoSendSelectionCallback() # do we want to send new selection
def zoomOut(self):
if len(self.zoomStack):
newXMin, newXMax, newYMin, newYMax = self.zoomStack.pop()
self.setNewZoom(newXMin, newXMax, newYMin, newYMax)
return 1
return 0
def setNewZoom(self, newXMin, newXMax, newYMin, newYMax):
oldXMin = self.axisScaleDiv(QwtPlot.xBottom).interval().minValue()
oldXMax = self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue()
oldYMin = self.axisScaleDiv(QwtPlot.yLeft).interval().minValue()
oldYMax = self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue()
stepX, stepY = self.axisStepSize(QwtPlot.xBottom), self.axisStepSize(QwtPlot.yLeft)
steps = 10
for i in range(1, steps+1):
midXMin = oldXMin * (steps-i)/float(steps) + newXMin * i/float(steps)
midXMax = oldXMax * (steps-i)/float(steps) + newXMax * i/float(steps)
midYMin = oldYMin * (steps-i)/float(steps) + newYMin * i/float(steps)
midYMax = oldYMax * (steps-i)/float(steps) + newYMax * i/float(steps)
self.setAxisScale(QwtPlot.xBottom, midXMin, midXMax, stepX)
self.setAxisScale(QwtPlot.yLeft, midYMin, midYMax, stepY)
#if i == steps:
# self.removeCurve(zoomOutCurveKey)
t = time.time()
self.replot()
if time.time()-t > 0.1:
self.setAxisScale(QwtPlot.xBottom, newXMin, newXMax, stepX)
self.setAxisScale(QwtPlot.yLeft, newYMin, newYMax, stepY)
self.replot()
break
def closestMarker(self, intX, intY):
point = QPoint(intX, intY)
marker = None
dist = 1e30
for curve in self.itemList():
if isinstance(curve, QwtPlotMarker):
curvePoint = QPoint(self.transform(QwtPlot.xBottom, curve.xValue()), self.transform(QwtPlot.yLeft, curve.yValue()))
d = (point - curvePoint).manhattanLength()
if d < dist:
dist = d
marker = curve
return marker, dist
def closestCurve(self, intX, intY):
point = QPoint(intX, intY)
nearestCurve = None
dist = 10000000000
index = -1
for curve in self.itemList():
if isinstance(curve, QwtPlotCurve) and curve.dataSize() > 0:
ind, d = curve.closestPoint(point)
if d < dist:
nearestCurve, dist, index = curve, d, ind
if nearestCurve == None:
return None, 0, 0, 0, 0
else:
return nearestCurve, dist, nearestCurve.x(index), nearestCurve.y(index), index
# ###############################################
# HANDLING MOUSE EVENTS
# ###############################################
def mousePressEvent(self, e):
if self.mousePressEventHandler != None:
handled = self.mousePressEventHandler(e)
if handled: return
QwtPlot.mousePressEvent(self, e)
canvasPos = self.canvas().mapFrom(self, e.pos())
if not self.canvas().contentsRect().contains(canvasPos):
# Press on the legend or axis widget.
return
xFloat = self.invTransform(QwtPlot.xBottom, canvasPos.x())
yFloat = self.invTransform(QwtPlot.yLeft, canvasPos.y())
self.xpos = canvasPos.x()
self.ypos = canvasPos.y()
self.mouseCurrentlyPressed = 1
self.mouseCurrentButton = e.button()
if self.state not in [ZOOMING, PANNING]:
insideRects = [rect.isInside(xFloat, yFloat) for rect in self.selectionCurveList]
onEdgeRects = [rect.isOnEdge(xFloat, yFloat) for rect in self.selectionCurveList]
# ####
# ZOOM
if e.button() == Qt.LeftButton and self.state == ZOOMING:
self.tempSelectionCurve = RectangleSelectionCurve(pen = Qt.DashLine)
self.tempSelectionCurve.attach(self)
# ####
# PANNING
elif e.button() == Qt.LeftButton and self.state == PANNING:
self.panPosition = e.globalX(), e.globalY()
self.paniniX = self.axisScaleDiv(QwtPlot.xBottom).interval().minValue(), self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue()
self.paniniY = self.axisScaleDiv(QwtPlot.yLeft).interval().minValue(), self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue()
elif e.button() == Qt.LeftButton and 1 in onEdgeRects and self.tempSelectionCurve == None:
self.resizingCurve = self.selectionCurveList[onEdgeRects.index(1)]
# have we pressed the mouse inside one of the selection curves?
elif e.button() == Qt.LeftButton and 1 in insideRects and self.tempSelectionCurve == None:
self.movingCurve = self.selectionCurveList[insideRects.index(1)]
self.movingCurve.mousePosition = (xFloat, yFloat)
# ####
# SELECT RECTANGLE
elif e.button() == Qt.LeftButton and self.state == SELECT_RECTANGLE:
self.tempSelectionCurve = RectangleSelectionCurve()
self.tempSelectionCurve.attach(self)
self.selectionCurveList.append(self.tempSelectionCurve)
# ####
# SELECT POLYGON
elif e.button() == Qt.LeftButton and self.state == SELECT_POLYGON:
if self.tempSelectionCurve == None:
self.tempSelectionCurve = SelectionCurve()
self.tempSelectionCurve.attach(self)
self.selectionCurveList.append(self.tempSelectionCurve)
self.tempSelectionCurve.addPoint(self.invTransform(QwtPlot.xBottom, self.xpos), self.invTransform(QwtPlot.yLeft, self.ypos))
self.tempSelectionCurve.addPoint(self.invTransform(QwtPlot.xBottom, self.xpos), self.invTransform(QwtPlot.yLeft, self.ypos))
if self.tempSelectionCurve.closed(): # did we intersect an existing line. if yes then close the curve and finish appending lines
self.tempSelectionCurve = None
self.replot()
if self.autoSendSelectionCallback: self.autoSendSelectionCallback() # do we want to send new selection
# only needed to show the message in statusbar
def mouseMoveEvent(self, e):
if self.mouseMoveEventHandler != None:
handled = self.mouseMoveEventHandler(e)
if handled: return
QwtPlot.mouseMoveEvent(self, e)
canvasPos = self.canvas().mapFrom(self, e.pos())
xFloat = self.invTransform(QwtPlot.xBottom, canvasPos.x())
yFloat = self.invTransform(QwtPlot.yLeft, canvasPos.y())
text = ""
if not self.mouseCurrentlyPressed:
(text, x, y) = self.tips.maybeTip(xFloat, yFloat)
if type(text) == int: text = self.buildTooltip(text)
if self.statusBar != None:
self.statusBar.showMessage(text)
if text != "":
self.showTip(self.transform(QwtPlot.xBottom, x), self.transform(QwtPlot.yLeft, y), text)
if self.tempSelectionCurve != None and (self.state == ZOOMING or self.state == SELECT_RECTANGLE):
x1 = self.invTransform(QwtPlot.xBottom, self.xpos)
y1 = self.invTransform(QwtPlot.yLeft, self.ypos)
self.tempSelectionCurve.setPoints(x1, y1, xFloat, yFloat)
self.replot()
elif self.tempSelectionCurve != None and self.state == SELECT_POLYGON:
self.tempSelectionCurve.replaceLastPoint(xFloat,yFloat)
self.replot()
elif hasattr(self, "resizingCurve"):
self.resizingCurve.updateCurve(xFloat, yFloat)
self.replot()
if self.sendSelectionOnUpdate and self.autoSendSelectionCallback:
self.autoSendSelectionCallback()
# do we have a selection curve we are currently moving?
elif hasattr(self, "movingCurve"):
self.movingCurve.moveBy(xFloat-self.movingCurve.mousePosition[0], yFloat-self.movingCurve.mousePosition[1])
self.movingCurve.mousePosition = (xFloat, yFloat)
self.replot()
if self.sendSelectionOnUpdate and self.autoSendSelectionCallback:
self.autoSendSelectionCallback()
elif self.state == PANNING and self.panPosition:
if hasattr(self, "paniniX") and hasattr(self, "paniniY"):
dx = self.invTransform(QwtPlot.xBottom, self.panPosition[0]) - self.invTransform(QwtPlot.xBottom, e.globalX())
dy = self.invTransform(QwtPlot.yLeft, self.panPosition[1]) - self.invTransform(QwtPlot.yLeft, e.globalY())
xEnabled, xMin, xMax = getattr(self, "xPanningInfo", (1, self.paniniX[0] + dx, self.paniniX[1] + dx))
yEnabled, yMin, yMax = getattr(self, "yPanningInfo", (1, self.paniniY[0] + dy, self.paniniY[1] + dy))
if self.paniniX[0] + dx < xMin: # if we reached the left edge, don't change the right edge
xMax = self.paniniX[1] - (self.paniniX[0] - xMin)
elif self.paniniX[1] + dx > xMax: # if we reached the right edge, don't change the left edge
xMin = self.paniniX[0] + (xMax - self.paniniX[1])
else:
xMin, xMax = self.paniniX[0] + dx, self.paniniX[1] + dx
if xEnabled: self.setAxisScale(QwtPlot.xBottom, xMin, xMax, self.axisStepSize(QwtPlot.xBottom))
if self.paniniY[0] + dy < yMin: # if we reached the left edge, don't change the right edge
yMax = self.paniniY[1] - (self.paniniY[0] - yMin)
elif self.paniniY[1] + dy > yMax: # if we reached the right edge, don't change the left edge
yMin = self.paniniY[0] + (yMax - self.paniniY[1])
else:
yMin, yMax = self.paniniY[0] + dy, self.paniniY[1] + dy
if yEnabled: self.setAxisScale(QwtPlot.yLeft, yMin, yMax, self.axisStepSize(QwtPlot.yLeft))
if xEnabled or yEnabled: self.replot()
# if we are in the selection state then we perhaps show the cursors to move or resize the selection curves
if self.state not in [ZOOMING, PANNING] and getattr(self, "resizingCurve", None) == None and self.tempSelectionCurve == None:
onEdge = [rect.isOnEdge(xFloat, yFloat) for rect in self.selectionCurveList]
if 1 in onEdge:
self.canvas().setCursor(self.selectionCurveList[onEdge.index(1)].appropriateCursor)
# check if we need to change the cursor if we are at some selection box
elif 1 in [rect.isInside(xFloat, yFloat) for rect in self.selectionCurveList]:
self.canvas().setCursor(Qt.OpenHandCursor)
else:
self.canvas().setCursor(self._cursor)
def mouseReleaseEvent(self, e):
if self.mouseReleaseEventHandler != None:
handled = self.mouseReleaseEventHandler(e)
if handled: return
QwtPlot.mouseReleaseEvent(self, e)
if not self.mouseCurrentlyPressed: return # this might happen if we double clicked the widget titlebar
self.mouseCurrentlyPressed = 0
self.mouseCurrentButton = 0
self.panPosition = None
staticClick = 0
canvasPos = self.canvas().mapFrom(self, e.pos())
if hasattr(self, "movingCurve"):
del self.movingCurve
if self.autoSendSelectionCallback:
self.autoSendSelectionCallback() # send the new selection
if hasattr(self, "resizingCurve"):
del self.resizingCurve
if self.autoSendSelectionCallback:
self.autoSendSelectionCallback() # send the new selection
if e.button() == Qt.LeftButton:
if self.xpos == canvasPos.x() and self.ypos == canvasPos.y():
handled = self.mouseStaticClickHandler(e)
if handled: return
staticClick = 1
if self.state == ZOOMING:
xmin, xmax = min(self.xpos, canvasPos.x()), max(self.xpos, canvasPos.x())
ymin, ymax = min(self.ypos, canvasPos.y()), max(self.ypos, canvasPos.y())
if self.tempSelectionCurve:
self.tempSelectionCurve.detach()
self.tempSelectionCurve = None
if staticClick or xmax-xmin < 4 or ymax-ymin < 4:
x = self.invTransform(QwtPlot.xBottom, canvasPos.x())
y = self.invTransform(QwtPlot.yLeft, canvasPos.y())
diffX = (self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue() - self.axisScaleDiv(QwtPlot.xBottom).interval().minValue()) / 2.
diffY = (self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue() - self.axisScaleDiv(QwtPlot.yLeft).interval().minValue()) / 2.
# use this to zoom to the place where the mouse cursor is
if diffX:
xmin = x - (diffX/2.) * (x - self.axisScaleDiv(QwtPlot.xBottom).interval().minValue()) / diffX
xmax = x + (diffX/2.) * (self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue() - x) / diffX
if diffY:
ymin = y + (diffY/2.) * (self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue() - y) / diffY
ymax = y - (diffY/2.) * (y - self.axisScaleDiv(QwtPlot.yLeft).interval().minValue()) / diffY
else:
xmin = self.invTransform(QwtPlot.xBottom, xmin); xmax = self.invTransform(QwtPlot.xBottom, xmax)
ymin = self.invTransform(QwtPlot.yLeft, ymin); ymax = self.invTransform(QwtPlot.yLeft, ymax)
self.zoomStack.append((self.axisScaleDiv(QwtPlot.xBottom).interval().minValue(), self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue(), self.axisScaleDiv(QwtPlot.yLeft).interval().minValue(), self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue()))
self.setNewZoom(xmin, xmax, ymax, ymin)
elif self.state == SELECT_RECTANGLE:
self.tempSelectionCurve = None
if self.autoSendSelectionCallback: self.autoSendSelectionCallback() # do we want to send new selection
elif e.button() == Qt.RightButton:
if self.state == ZOOMING:
ok = self.zoomOut()
if not ok:
self.removeLastSelection()
return
elif self.state == SELECT_RECTANGLE:
ok = self.removeLastSelection() # remove the rectangle
if not ok: self.zoomOut()
elif self.state == SELECT_POLYGON:
if self.tempSelectionCurve:
self.tempSelectionCurve.removeLastPoint()
if self.tempSelectionCurve.dataSize() == 0: # remove the temp curve
self.tempSelectionCurve = None
self.removeLastSelection()
else: # set new last point
self.tempSelectionCurve.replaceLastPoint(self.invTransform(QwtPlot.xBottom, canvasPos.x()), self.invTransform(QwtPlot.yLeft, canvasPos.y()))
self.replot()
else:
ok = self.removeLastSelection()
if not ok: self.zoomOut()
def wheelEvent(self, e):
if not self.enableWheelZoom:
return
d = -e.delta()/120.
if getattr(self, "controlPressed", False):
ys = self.axisScaleDiv(QwtPlot.yLeft)
yoff = d * (ys.interval().maxValue() - ys.interval().minValue()) / 100.
self.setAxisScale(QwtPlot.yLeft, ys.interval().maxValue() + yoff, ys.interval().maxValue() + yoff, self.axisStepSize(QwtPlot.yLeft))
elif getattr(self, "altPressed", False):
xs = self.axisScaleDiv(QwtPlot.xBottom)
xoff = d * (xs.interval().maxValue() - xs.interval().minValue()) / 100.
self.setAxisScale(QwtPlot.xBottom, xs.interval().minValue() - xoff, xs.interval().maxValue() - xoff, self.axisStepSize(QwtPlot.xBottom))
else:
ro, rn = .9**d, 1-.9**d
pos = self.mapFromGlobal(e.pos())
ex, ey = pos.x(), pos.y()
xs = self.axisScaleDiv(QwtPlot.xBottom)
x = self.invTransform(QwtPlot.xBottom, ex)
self.setAxisScale(QwtPlot.xBottom, ro*xs.interval().minValue() + rn*x, ro*xs.interval().maxValue() + rn*x, self.axisStepSize(QwtPlot.xBottom))
ys = self.axisScaleDiv(QwtPlot.yLeft)
y = self.invTransform(QwtPlot.yLeft, ey)
self.setAxisScale(QwtPlot.yLeft, ro*ys.interval().minValue() + rn*y, ro*ys.interval().maxValue() + rn*y, self.axisStepSize(QwtPlot.yLeft))
self.replot()
# does a point (x,y) lie inside one of the selection rectangles (polygons)
def isPointSelected(self, x,y):
for curve in self.selectionCurveList:
if curve.isInside(x,y): return 1
return 0
# return two lists of 0's and 1's whether each point in (xData, yData) is selected or not
def getSelectedPoints(self, xData, yData, validData):
import numpy
total = numpy.zeros(len(xData))
for curve in self.selectionCurveList:
total += curve.getSelectedPoints(xData, yData, validData)
unselected = numpy.equal(total, 0)
selected = 1 - unselected
return selected.tolist(), unselected.tolist()
# save graph in matplotlib python file
def saveToMatplotlib(self, fileName, size = QSize(400,400)):
f = open(fileName, "wt")
x1 = self.axisScaleDiv(QwtPlot.xBottom).interval().minValue(); x2 = self.axisScaleDiv(QwtPlot.xBottom).interval().maxValue()
y1 = self.axisScaleDiv(QwtPlot.yLeft).interval().minValue(); y2 = self.axisScaleDiv(QwtPlot.yLeft).interval().maxValue()
if self.showAxisScale == 0: edgeOffset = 0.01
else: edgeOffset = 0.08
f.write("from pylab import *\nfrom matplotlib import font_manager\n\n#possible changes in how the plot looks\n#rcParams['xtick.major.size'] = 0\n#rcParams['ytick.major.size'] = 0\n\n#constants\nx1 = %f; x2 = %f\ny1 = %f; y2 = %f\ndpi = 80\nxsize = %d\nysize = %d\nedgeOffset = %f\n\nfigure(facecolor = 'w', figsize = (xsize/float(dpi), ysize/float(dpi)), dpi = dpi)\nhold(True)\n" % (x1,x2,y1,y2,size.width(), size.height(), edgeOffset))
linestyles = ["None", "-", "-.", "--", ":", "-", "-"] # qwt line styles: NoCurve, Lines, Sticks, Steps, Dots, Spline, UserCurve
markers = ["None", "o", "s", "^", "d", "v", "^", "<", ">", "x", "+"] # curveSymbols = [None, Ellipse, Rect, Triangle, Diamond, DTriangle, UTriangle, LTriangle, RTriangle, XCross, Cross]
f.write("#add curves\n")
for c in self.itemList():
if not isinstance(c, QwtPlotCurve): continue
xData = [c.x(i) for i in range(c.dataSize())]
yData = [c.y(i) for i in range(c.dataSize())]
marker = markers[c.symbol().style()+1]
markersize = c.symbol().size().width()
markeredgecolor, foo = self._getColorFromObject(c.symbol().pen())
markerfacecolor, alphaS = self._getColorFromObject(c.symbol().brush())
colorP, alphaP = self._getColorFromObject(c.pen())
colorB, alphaB = self._getColorFromObject(c.brush())
alpha = min(alphaS, alphaP, alphaB)
linewidth = c.pen().width()
if c.__class__ == PolygonCurve and len(xData) == 4:
x0 = min(xData); x1 = max(xData); diffX = x1-x0
y0 = min(yData); y1 = max(yData); diffY = y1-y0
f.write("gca().add_patch(Rectangle((%f, %f), %f, %f, edgecolor=%s, facecolor = %s, linewidth = %d, fill = 1, alpha = %.3f))\n" % (x0,y0,diffX, diffY, colorP, colorB, linewidth, alpha))
elif c.style() < len(linestyles):
linestyle = linestyles[c.style()]
f.write("plot(%s, %s, marker = '%s', linestyle = '%s', markersize = %d, markeredgecolor = %s, markerfacecolor = %s, color = %s, linewidth = %d, alpha = %.3f)\n" % (xData, yData, marker, linestyle, markersize, markeredgecolor, markerfacecolor, colorP, linewidth, alpha))
f.write("\n# add markers\n")
for marker in self.itemList():
if not isinstance(marker, QwtPlotMarker): continue
x = marker.xValue()
y = marker.yValue()
text = str(marker.label().text())
align = marker.labelAlignment()
xalign = (align & Qt.AlignLeft and "right") or (align & Qt.AlignHCenter and "center") or (align & Qt.AlignRight and "left")
yalign = (align & Qt.AlignBottom and "top") or (align & Qt.AlignTop and "bottom") or (align & Qt.AlignVCenter and "center")
vertAlign = (yalign and ", verticalalignment = '%s'" % yalign) or ""
horAlign = (xalign and ", horizontalalignment = '%s'" % xalign) or ""
labelColor = marker.label().color()
color = (labelColor.red()/255., labelColor.green()/255., labelColor.blue()/255.)
alpha = labelColor.alpha()/255.
name = str(marker.label().font().family())
weight = marker.label().font().bold() and "bold" or "normal"
if marker.__class__ == RotatedMarker: extra = ", rotation = %f" % (marker.rotation)
else: extra = ""
f.write("text(%f, %f, '%s'%s%s, color = %s, name = '%s', weight = '%s'%s, alpha = %.3f)\n" % (x, y, text, vertAlign, horAlign, color, name, weight, extra, alpha))
# grid
f.write("# enable grid\ngrid(%s)\n\n" % (self.gridCurve.xEnabled() and self.gridCurve.yEnabled() and "True" or "False"))
# axis
if self.showAxisScale == 0:
f.write("#hide axis\naxis('off')\naxis([x1, x2, y1, y2])\ngca().set_position([edgeOffset, edgeOffset, 1 - 2*edgeOffset, 1 - 2*edgeOffset])\n")
else:
if self.axisScaleDraw(QwtPlot.yLeft).__class__ == DiscreteAxisScaleDraw:
labels = self.axisScaleDraw(QwtPlot.yLeft).labels
f.write("yticks(%s, %s)\nlabels = gca().get_yticklabels()\nsetp(labels, rotation=-%.3f) #, weight = 'bold', fontsize=10)\n\n" % (range(len(labels)), labels, self.axisScaleDraw(QwtPlot.yLeft).labelRotation()))
if self.axisScaleDraw(QwtPlot.xBottom).__class__ == DiscreteAxisScaleDraw:
labels = self.axisScaleDraw(QwtPlot.xBottom).labels
f.write("xticks(%s, %s)\nlabels = gca().get_xticklabels()\nsetp(labels, rotation=-%.3f) #, weight = 'bold', fontsize=10)\n\n" % (range(len(labels)), labels, self.axisScaleDraw(QwtPlot.xBottom).labelRotation()))
f.write("#set axis labels\nxlabel('%s', weight = 'bold')\nylabel('%s', weight = 'bold')\n\n" % (str(self.axisTitle(QwtPlot.xBottom).text()), str(self.axisTitle(QwtPlot.yLeft).text())))
f.write("\naxis([x1, x2, y1, y2])\ngca().set_position([edgeOffset, edgeOffset, 1 - 2*edgeOffset, 1 - 2*edgeOffset])\n#subplots_adjust(left = 0.08, bottom = 0.11, right = 0.98, top = 0.98)\n")
f.write("\n# possible settings to change\n#axes().set_frame_on(0) #hide the frame\n#axis('off') #hide the axes and labels on them\n\n")
if self.legend().itemCount() > 0:
legendItems = []
for widget in self.legend().legendItems():
item = self.legend().find(widget)
text = str(item.title().text()).replace("<b>", "").replace("</b>", "")
if not item.symbol():
legendItems.append((text, None, None, None, None))
else:
penC, penA = self._getColorFromObject(item.symbol().pen())
brushC, brushA = self._getColorFromObject(item.symbol().brush())
legendItems.append((text, markers[item.symbol().style()+1], penC, brushC, min(brushA, penA)))
f.write("""
#functions to show legend below the figure
def drawSomeLegendItems(x, items, itemsPerAxis = 1, yDiff = 0.0):
axes([x-0.1, .018*itemsPerAxis - yDiff, .2, .018], frameon = 0); axis('off')
lines = [plot([],[], label = text, marker = marker, markeredgecolor = edgeC, markerfacecolor = faceC, alpha = alpha) for (text, marker, edgeC, faceC, alpha) in items]
legend(lines, [item[0] for item in items], 'upper center', handlelen = 0.1, numpoints = 1, prop = font_manager.FontProperties(size=11))
gca().get_legend().draw_frame(False)
def drawLegend(items):
if not items: return
maxAttrInLine = 5
xs = [i/float(min(maxAttrInLine+1, len(items)+1)) for i in range(1, min(maxAttrInLine+1, len(items)+1))]
if items[0][1] == None: extraLabelForClass = [xs.pop(0), [items.pop(0)]]
itemsPerAxis = len(items) / len(xs) + (len(items) %% len(xs) != 0)
if "extraLabelForClass" in dir(): drawSomeLegendItems(extraLabelForClass[0], extraLabelForClass[1], itemsPerAxis, yDiff = 0.004)
for i, x in enumerate(xs):
drawSomeLegendItems(x, items[i*itemsPerAxis: min(len(items), (i+1)*itemsPerAxis)], itemsPerAxis)
items = %s
drawLegend(items)\n""" % (str(legendItems)))
f.write("\nshow()")
def _getColorFromObject(self, obj):
if isinstance(obj, QBrush) and obj.style() == Qt.NoBrush: return "'none'", 1
if isinstance(obj, QPen) and obj.style() == Qt.NoPen: return "'none'", 1
col = [obj.color().red(), obj.color().green(), obj.color().blue()];
col = tuple([v/float(255) for v in col])
return col, obj.color().alpha()/float(255)
| gpl-3.0 |
guschmue/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 8 | 5531 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
rishikksh20/scikit-learn | examples/mixture/plot_concentration_prior.py | 16 | 5657 | """
========================================================================
Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture
========================================================================
This example plots the ellipsoids obtained from a toy dataset (mixture of three
Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a
Dirichlet distribution prior
(``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet
process prior (``weight_concentration_prior_type='dirichlet_process'``). On
each figure, we plot the results for three different values of the weight
concentration prior.
The ``BayesianGaussianMixture`` class can adapt its number of mixture
componentsautomatically. The parameter ``weight_concentration_prior`` has a
direct link with the resulting number of components with non-zero weights.
Specifying a low value for the concentration prior will make the model put most
of the weight on few components set the remaining components weights very close
to zero. High values of the concentration prior will allow a larger number of
components to be active in the mixture.
The Dirichlet process prior allows to define an infinite number of components
and automatically selects the correct number of components: it activates a
component only if it is necessary.
On the contrary the classical finite mixture model with a Dirichlet
distribution prior will favor more uniformly weighted components and therefore
tends to divide natural clusters into unnecessary sub-components.
"""
# Author: Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle)
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor('#56B4E9')
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3,
align='center')
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left='off',
right='off', labelleft='off')
ax2.tick_params(axis='x', which='both', top='off')
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters of the dataset
random_state, n_components, n_features = 2, 3, 2
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Finite mixture with a Dirichlet distribution\nprior and "
r"$\gamma_0=$", BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [0.001, 1, 1000]),
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([j * np.ones(samples[j], dtype=int)
for j in range(n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
| bsd-3-clause |
lucafon/ArtificialIntelligence | src/classification_sklearn/problem2_3.py | 1 | 1985 | '''
Created on Mar 5, 2017
@author: Luca Fontanili
'''
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def main(args):
n_iter = 100
if len(args) != 3:
raise ValueError('Bad argument list')
inp = open(args[1], 'r')
learning_rates = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 0.53]
out = open(args[2], 'w')
values = []
for line in inp:
params = line.strip().split(",")
values.append((1, float(params[0]),float(params[1]),float(params[2])))
dataset = pd.DataFrame(values,columns=['ones', 'age','weight','height'])
for feature in ['age', 'weight']:
dataset[feature] = (dataset[feature] - dataset[feature].mean())/dataset[feature].std()
X = dataset[['ones', 'age', 'weight']]
y = dataset[['height']]
for alpha in learning_rates:
beta = np.zeros(3)
# print('new alpha: ',alpha)
for i in range(n_iter):
old_beta = np.array(beta)
count = 0
for feature in ['ones', 'age', 'weight']:
beta[count] = old_beta[count] - alpha/len(y) * np.sum(((X.dot(old_beta) - y.transpose())*(X[feature].transpose())).transpose())
count += 1
# print(beta)
# print(compute_cost(X, beta, y))
out.write(str(alpha) + ',' + str(n_iter) + ',' + str(beta[0]) + ',' + str(beta[1]) + ',' + str(beta[2]) + '\n')
threedee = plt.figure().gca(projection='3d')
threedee.scatter(dataset['age'], dataset['weight'], dataset['height'])
threedee.set_xlabel('Age (years)')
threedee.set_ylabel('Weight (kilos)')
threedee.set_zlabel('Height (meters)')
# threedee.plot_surface(xx,yy,z1, color='blue')
plt.show()
def compute_cost(X, beta, y):
squared_error = np.sum(((X.dot(beta)-y.transpose())**2).transpose())
J = squared_error/(2*len(y))
return J
if __name__ == '__main__':
main(sys.argv) | mit |
parnellj/fitbit_generator | fitbit_generator/extractor.py | 1 | 8485 | from __future__ import division
import os
import fitbit
import glob
from datetime import datetime as dt, timedelta as tdelt
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_rows', 1000)
CONFIGS = os.path.join('.', 'config')
INPATH = os.path.join('.', 'inputs')
OUTPATH = os.path.join('.', 'outputs')
LOCAL_PATH = os.path.join('D:', os.sep, 'Dropbox', 'food_and_fitness', 'fitbit_data')
with open(os.path.join(CONFIGS, 'api_key.txt'), 'r') as f:
t = []
for line in f.readlines():
t.append(tuple(line[:-1].split(' = ')))
token = dict(t)
CLIENT_ID = token['CLIENT_ID']
CLIENT_SECRET = token['CLIENT_SECRET']
REDIRECT_URI = token['REDIRECT_URI']
ZERO_DAY = dt(1900, 1, 1, 0, 0, 0)
COL_PARAMS = {'steps': {'fill': 'subdivide', 'aggregate': sum},
'elevation': {'fill': 'subdivide', 'aggregate': sum},
'heart': {'fill': 'interpolate', 'aggregate':np.mean},
'sleep': {'fill': 'repeat', 'aggregate': lambda x: sum(x > 0) / 3600}}
class Dataset:
def __init__(self, start_day=None, end_day=None, load_dir=None):
self.time_series = None
self.start_day = None
self.end_day = None
# 1. If a source directory is provided, load raw datasets from there.
# 2. If no start and end day is provided, assume the range is yesterday -> today
# 3. Otherwise, initialize a new time series.
if load_dir is not None:
self.load_raw(load_dir)
self.start_day = self.time_series.index.min()
self.end_day = self.time_series.index.max()
return
elif start_day is None and end_day is None:
self.start_day = dt.today() - tdelt(days=1)
self.end_day = dt.today()
else:
self.start_day = start_day
self.end_day = end_day
self.time_series = pd.DataFrame(index=pd.date_range(self.start_day, self.end_day + tdelt(days=1), freq='S'))
def download_data(self, fields=None):
if fields is None:
fields = ['heart', 'steps', 'elevation', 'sleep']
# Load an existing authorization token into memory
with open(os.path.join(CONFIGS, 'auth_token.txt'), 'r') as f:
t = []
for line in f.readlines():
t.append(tuple(line[:-1].split(' = ')))
token = dict(t)
# Initiate the fitbit API client
fb = fitbit.Fitbit(client_id=CLIENT_ID, client_secret=CLIENT_SECRET,
access_token=token['access_token'],
refresh_token=token['refresh_token'],
expires_at=float(token['expires_at']))
for day in [self.end_day - tdelt(days=x) for x in range(0, (self.end_day - self.start_day).days + 1)]:
if 'heart' in fields:
heart_intraday = fb.intraday_time_series(resource='activities/heart',
base_date=day, detail_level='1sec')
heart_data = heart_intraday[u'activities-heart-intraday'][u'dataset']
if heart_data:
if heart_data[0][u'time'] != u'00:00:00':
heart_data = [{u'time': u'00:00:00', u'value': 0}] + heart_data
if heart_data[-1][u'time'] != u'23:59:59':
heart_data = heart_data + [{u'time': u'23:59:59', u'value': 0}]
self.add_observation('heart', day, heart_data, fill='interpolate')
else:
self.add_observation('heart', day, heart_data, fill='NA')
if 'steps' in fields:
steps_intraday = fb.intraday_time_series(resource='activities/steps',
base_date=day, detail_level='1min')
step_data = steps_intraday[u'activities-steps-intraday'][u'dataset']
if step_data:
if step_data[0][u'time'] != u'00:00:00':
step_data = [{u'time': u'00:00:00', u'value': 0}] + step_data
if step_data[-1][u'time'] != u'23:59:59':
step_data = step_data + [{u'time': u'23:59:59', u'value': 0}]
self.add_observation('steps', day, step_data, fill='subdivide')
else:
self.add_observation('steps', day, step_data, fill='NA')
if 'elevation' in fields:
elevation_intraday = fb.intraday_time_series(resource='activities/elevation',
base_date=day, detail_level='1min')
elevation_data = elevation_intraday[u'activities-elevation-intraday'][u'dataset']
if elevation_data:
if elevation_data[0][u'time'] != u'00:00:00':
elevation_data = [{u'time': u'00:00:00', u'value': 0}] + elevation_data
if elevation_data[-1][u'time'] != u'23:59:59':
elevation_data = elevation_data + [{u'time': u'23:59:59', u'value': 0}]
self.add_observation('elevation', day, elevation_data, fill='subdivide')
else:
self.add_observation('elevation', day, elevation_data, fill='NA')
if 'sleep' in fields:
sleep = fb.get_sleep(date=day)
try:
sleep_data = [{u'value': s[u'value'], u'time': s[u'dateTime']}
for s in sleep[u'sleep'][0][u'minuteData']]
except IndexError:
sleep_data = None
if sleep_data:
self.add_observation('sleep', day, sleep_data, fill='repeat')
else:
self.add_observation('sleep', day, sleep_data, fill='NA')
def add_observation(self, colname, start_day, observations, fill='subdivide'):
if observations is None:
return
day = start_day
for a, b in zip(observations, observations[1:]):
this_time = dt.combine(day, dt.strptime(a['time'], '%H:%M:%S').time())
next_time = dt.combine(day, (dt.strptime(b['time'], '%H:%M:%S') - tdelt(seconds=1)).time())
# In case a day boundary is crossed
if next_time.time() < this_time.time():
day = (day + tdelt(days=1))
next_time = dt.combine(day, (dt.strptime(b['time'], '%H:%M:%S') - tdelt(seconds=1)).time())
time_steps = (next_time - this_time).seconds + 1
if fill == 'subdivide': period_obs = [a['value'] / time_steps] * time_steps
elif fill == 'interpolate': period_obs = np.linspace(a['value'], b['value'], time_steps)
elif fill == 'repeat': period_obs = [float(a['value'])] * time_steps
elif fill == 'NA': period_obs = ['NA'] * time_steps
else: period_obs = [a['value']] * time_steps
try: self.time_series.loc[this_time:next_time, colname] = period_obs
except ValueError: print 'Value Error'
def resample(self, resolution='H'):
return self.time_series.resample(resolution).agg({k: col['aggregate']
for k, col in COL_PARAMS.iteritems()})
def save_raw(self):
ts_list = [group[1] for group in self.time_series.groupby(self.time_series.index.day)]
for ts in ts_list[:-1]:
ts.to_csv(os.path.join(LOCAL_PATH, '0 - raw', ts.index.min().strftime('%Y%m%d') + '.csv'))
def load_raw(self, directory):
all_files = glob.glob(os.path.join(directory, '*.csv'))
all_raw_dfs = [pd.read_csv(f, index_col=0) for f in all_files]
dt_dfs = []
for df in all_raw_dfs:
df.index = pd.to_datetime(df.index)
dt_dfs.append(df)
self.time_series = pd.concat(dt_dfs)
if __name__ == '__main__':
# ds = Dataset(load_dir=os.path.join(LOCAL_PATH, '0 - raw'))
start_day = dt(2017, 9, 24)
end_day = dt(2017, 10, 28)
ds = Dataset(start_day, end_day)
ds.download_data()
ds.save_raw()
| gpl-3.0 |
rishikksh20/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 79 | 2497 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from sklearn.utils.testing import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
abyssxsy/gnuradio | gr-filter/examples/fft_filter_ccc.py | 47 | 4363 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fft_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw0, bw1, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw0 = bw0
self._bw1 = bw1
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.complex_band_pass_2(1, self._fs,
self._bw0, self._bw1,
self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fft_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-S", "--start-pass", type="eng_float", default=1000,
help="Start of Passband [default=%default]")
parser.add_option("-E", "--end-pass", type="eng_float", default=2000,
help="End of Passband [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fft_filter_ccc(options.nsamples,
options.samplerate,
options.start_pass,
options.end_pass,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
samuelefiorini/minimal | examples/gl.py | 1 | 2452 | #!/usr/bin/env python
import numpy as np
from minimal.estimators import GroupLasso, GroupLassoClassifier
from sklearn import metrics
def main():
# np.random.seed(42)
# The number of samples is defined as:
n = 200
# The number of features per group is defined as:
d_group = 10
# The number of groups
n_group = 10
# The final dimension of the data
d = d_group * n_group
# Create covariance matrix
rho = 0.5
THETA = np.zeros((d_group, d_group))
for i in range(d_group):
for j in range(d_group):
THETA[i, j] = rho**np.abs(i-j)
# Define X randomly as simulated data with group structure
X = np.hstack([
np.random.multivariate_normal(mean=np.ones(d_group)*(4*np.random.rand(1)-2),
cov=THETA, size=(n)) for i in range(n_group)])/np.sqrt(n)
X = X - np.mean(X, axis=0)
# Define beta_star a 0-1 vector with group structure:
# the relevant groups will be the g0 and g2
beta_star = np.zeros(d)
beta_star[:d_group] = 1 + np.hstack([np.random.randn(1)]*d_group)
beta_star[2*d_group:3*d_group] = -1 + np.hstack([np.random.randn(1)]*d_group)
# Define y as X*beta + noise
noise = np.random.randn(n)
y = np.sign(X.dot(beta_star) + noise)
# y = X.dot(beta_star) + noise
print(X.shape)
print(beta_star.shape)
print(y.shape)
# Evaluate the chance probability
chance = 0.5 + abs(y.sum())/(2.0*n)
print("Chance: {:2.3f}".format(chance))
print('----------------------')
# Best model error
# print("Best model error: {:2.3f}".format(np.mean(abs(y - X.dot(beta_star)))))
# Define the groups variable as in
# parsimony.functions.nesterov.gl.linear_operator_from_groups
groups = [map(lambda x: x+i, range(d_group)) for i in range(0, d, d_group)]
# print(groups)
print(beta_star)
# mdl = GroupLasso(alpha=0.1, groups=groups) # square
# mdl = GroupLasso(alpha=0.01, groups=groups)
mdl = GroupLassoClassifier(alpha=0.04, groups=groups, loss='square')
# mdl = GroupLassoClassifier(alpha=0.01, groups=groups, loss='logit')
mdl.fit(X, y)
print(mdl.coef_)
print("Estimated prediction accuracy = {:2.3f}".format(
metrics.accuracy_score(mdl.predict(X), y)))
# print("Estimated prediction error = {:2.3f}".format(
# metrics.mean_absolute_error(mdl.predict(X), y)))
if __name__ == '__main__':
main()
| bsd-2-clause |
dhennes/pykep | PyKEP/orbit_plots/_plots.py | 1 | 15613 | def plot_planet(plnt, t0='PyKEP.epoch(0)', N=60, units=1.0, color='k', alpha=1.0, s=40, legend=False, ax=None):
"""
ax = plot_planet(plnt, t0='PyKEP.epoch(0)', N=60, units=1.0, color='k', s=40, legend=False, ax=None)
- ax: 3D axis object created using fig.gca(projection='3d')
- plnt: PyKEP.planet object we want to plot
- t0: PyKEP.epoch object indicating when we want to plot the planet position
- units: the length unit to be used in the plot
- color: matplotlib color to use to plot the line
- s: planet size (pixel^2)
- legend when True plots the legend with the planet name and the epoch
Plots the planet position and its orbit
Example::
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
pl = planet_ss('earth')
plot_planet(pl, ax=ax)
plt.show()
"""
from PyKEP import MU_SUN, SEC2DAY, epoch, AU
from math import pi, sqrt
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
if t0 == 'PyKEP.epoch(0)':
t0 = epoch(0)
# orbit period at epoch
T = plnt.compute_period(t0) * SEC2DAY
# points where the orbit will be plotted
when = np.linspace(0, T, N)
# Ephemerides Calculation for the given planet
x = np.array([0.0] * N)
y = np.array([0.0] * N)
z = np.array([0.0] * N)
for i, day in enumerate(when):
r, v = plnt.eph(epoch(t0.mjd2000 + day))
x[i] = r[0] / units
y[i] = r[1] / units
z[i] = r[2] / units
# Actual plot commands
if legend:
label = plnt.name + " " + t0.__repr__()[0:11]
else:
label = None
axis.plot(x, y, z, label=label, c=color, alpha=alpha)
axis.scatter([x[0]], [y[0]], [z[0]], s=s, marker='o', alpha=0.8, c=color)
if legend:
axis.legend()
if ax is None: # show only if axis is not set
plt.show()
return axis
def plot_lambert(l, N=60, sol=0, units=1.0, color='b', legend=False, ax=None, alpha=1.):
"""
ax = plot_lambert(l, N=60, sol=0, units='PyKEP.AU', legend='False', ax=None, alpha=1.)
- ax: 3D axis object created using fig.gca(projection='3d')
- l: PyKEP.lambert_problem object
- N: number of points to be plotted along one arc
- sol: solution to the Lambert's problem we want to plot (must be in 0..Nmax*2)
where Nmax is the maximum number of revolutions for which there exist a solution.
- units: the length unit to be used in the plot
- color: matplotlib color to use to plot the line
- legend: when True it plots also the legend with info on the Lambert's solution chosen
Plots a particular solution to a Lambert's problem
Example::
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
t1 = epoch(0)
t2 = epoch(640)
dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC
pl = planet_ss('earth')
plot_planet(pl, t0=t1, ax=ax, color='k')
rE,vE = pl.eph(t1)
pl = planet_ss('mars')
plot_planet(pl, t0=t2, ax=ax, color='r')
rM, vM = pl.eph(t2)
l = lambert_problem(rE,rM,dt,MU_SUN)
plot_lambert(l, ax=ax, color='b')
plot_lambert(l, sol=1, ax=ax, color='g')
plot_lambert(l, sol=2, ax=ax, color='g')
plt.show()
"""
from PyKEP import propagate_lagrangian, AU
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
if sol > l.get_Nmax() * 2:
raise ValueError("sol must be in 0 .. NMax*2 \n * Nmax is the maximum number of revolutions for which there exist a solution to the Lambert's problem \n * You can compute Nmax calling the get_Nmax() method of the lambert_problem object")
# We extract the relevant information from the Lambert's problem
r = l.get_r1()
v = l.get_v1()[sol]
T = l.get_tof()
mu = l.get_mu()
# We define the integration time ...
dt = T / (N - 1)
# ... and alocate the cartesian components for r
x = np.array([0.0] * N)
y = np.array([0.0] * N)
z = np.array([0.0] * N)
# We calculate the spacecraft position at each dt
for i in range(N):
x[i] = r[0] / units
y[i] = r[1] / units
z[i] = r[2] / units
r, v = propagate_lagrangian(r, v, dt, mu)
# And we plot
if legend:
label = 'Lambert solution (' + str((sol + 1) / 2) + ' revs.)'
else:
label = None
axis.plot(x, y, z, c=color, label=label, alpha=alpha)
if legend:
axis.legend()
if ax is None: # show only if axis is not set
plt.show()
return axis
def plot_kepler(r, v, t, mu, N=60, units=1, color='b', legend=False, ax=None):
"""
ax = plot_kepler(r, v, t, mu, N=60, units=1, color='b', legend=False, ax=None):
- ax: 3D axis object created using fig.gca(projection='3d')
- r: initial position (cartesian coordinates)
- v: initial velocity (cartesian coordinates)
- t: propagation time
- mu: gravitational parameter
- N: number of points to be plotted along one arc
- units: the length unit to be used in the plot
- color: matplotlib color to use to plot the line
- legend when True it plots also the legend
Plots the result of a keplerian propagation
"""
from PyKEP import propagate_lagrangian
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
# We define the integration time ...
dt = t / (N - 1)
# ... and calculate the cartesian components for r
x = [0.0] * N
y = [0.0] * N
z = [0.0] * N
# We calculate the spacecraft position at each dt
for i in range(N):
x[i] = r[0] / units
y[i] = r[1] / units
z[i] = r[2] / units
r, v = propagate_lagrangian(r, v, dt, mu)
# And we plot
if legend:
label = 'ballistic arc'
else:
label = None
axis.plot(x, y, z, c=color, label=label)
if legend:
axis.legend()
if ax is None: # show only if axis is not set
plt.show()
return axis
def plot_taylor(r, v, m, u, t, mu, veff, N=60, units=1, color='b', legend=False, ax=None):
"""
ax = plot_taylor(r, v, m, u, t, mu, veff, N=60, units=1, color='b', legend=False, ax=None):
- ax: 3D axis object created using fig.gca(projection='3d')
- r: initial position (cartesian coordinates)
- v: initial velocity (cartesian coordinates)
- m: initial mass
- u: cartesian components for the constant thrust
- t: propagation time
- mu: gravitational parameter
- veff: the product Isp * g0
- N: number of points to be plotted along one arc
- units: the length unit to be used in the plot
- color: matplotlib color to use to plot the line
- legend: when True it plots also the legend
Plots the result of a taylor propagation of constant thrust
"""
from PyKEP import propagate_taylor
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
# We define the integration time ...
dt = t / (N - 1)
# ... and calcuate the cartesian components for r
x = [0.0] * N
y = [0.0] * N
z = [0.0] * N
# We calculate the spacecraft position at each dt
for i in range(N):
x[i] = r[0] / units
y[i] = r[1] / units
z[i] = r[2] / units
r, v, m = propagate_taylor(r, v, m, u, dt, mu, veff, -10, -10)
# And we plot
if legend:
label = 'constant thrust arc'
else:
label = None
axis.plot(x, y, z, c=color, label=label)
if legend:
axis.legend()
if ax is None: # show only if axis is not set
plt.show()
return axis
def plot_sf_leg(leg, N=5, units=1, color='b', legend=False, plot_line=True, ax=None):
"""
ax = plot_sf_leg(leg, N=5, units=1, color='b', legend=False, no_trajectory=False, ax=None):
- ax: 3D axis object created using fig.gca(projection='3d')
- leg: a PyKEP.sims_flanagan.leg
- N: number of points to be plotted along one arc
- units: the length unit to be used in the plot
- color: matplotlib color to use to plot the trajectory and the grid points
- legend when True it plots also the legend
- plot_line: when True plots also the trajectory (between mid-points and grid points)
Plots a Sims-Flanagan leg
Example::
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
t1 = epoch(0)
pl = planet_ss('earth')
rE,vE = pl.eph(t1)
plot_planet(pl,t0=t1, units=AU, ax=ax)
t2 = epoch(440)
pl = planet_ss('mars')
rM, vM = pl.eph(t2)
plot_planet(pl,t0=t2, units=AU, ax=ax)
sc = sims_flanagan.spacecraft(4500,0.5,2500)
x0 = sims_flanagan.sc_state(rE,vE,sc.mass)
xe = sims_flanagan.sc_state(rM,vM,sc.mass)
l = sims_flanagan.leg(t1,x0,[1,0,0]*5,t2,xe,sc,MU_SUN)
plot_sf_leg(l, units=AU, ax=ax)
"""
from PyKEP import propagate_lagrangian, AU, DAY2SEC, G0, propagate_taylor
import numpy as np
from scipy.linalg import norm
from math import exp
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
# We compute the number of segments for forward and backward propagation
n_seg = len(leg.get_throttles())
fwd_seg = (n_seg + 1) // 2
back_seg = n_seg // 2
# We extract information on the spacecraft
sc = leg.get_spacecraft()
isp = sc.isp
max_thrust = sc.thrust
# And on the leg
throttles = leg.get_throttles()
mu = leg.get_mu()
# Forward propagation
# x,y,z contain the cartesian components of all points (grid+midpints)
x = [0.0] * (fwd_seg * 2 + 1)
y = [0.0] * (fwd_seg * 2 + 1)
z = [0.0] * (fwd_seg * 2 + 1)
state = leg.get_xi()
# Initial conditions
r = state.r
v = state.v
m = state.m
x[0] = r[0] / units
y[0] = r[1] / units
z[0] = r[2] / units
# We compute all points by propagation
for i, t in enumerate(throttles[:fwd_seg]):
dt = (t.end.mjd - t.start.mjd) * DAY2SEC
alpha = min(norm(t.value), 1.0)
# Keplerian propagation and dV application
if leg.high_fidelity is False:
dV = [max_thrust / m * dt * dumb for dumb in t.value]
if plot_line:
plot_kepler(r, v, dt / 2, mu, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v = propagate_lagrangian(r, v, dt / 2, mu)
x[2 * i + 1] = r[0] / units
y[2 * i + 1] = r[1] / units
z[2 * i + 1] = r[2] / units
# v= v+dV
v = [a + b for a, b in zip(v, dV)]
if plot_line:
plot_kepler(r, v, dt / 2, mu, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v = propagate_lagrangian(r, v, dt / 2, mu)
x[2 * i + 2] = r[0] / units
y[2 * i + 2] = r[1] / units
z[2 * i + 2] = r[2] / units
m *= exp(-norm(dV) / isp / G0)
# Taylor propagation of constant thrust u
else:
u = [max_thrust * dumb for dumb in t.value]
if plot_line:
plot_taylor(r, v, m, u, dt / 2, mu, isp * G0, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v, m = propagate_taylor(r, v, m, u, dt / 2, mu, isp * G0, -10, -10)
x[2 * i + 1] = r[0] / units
y[2 * i + 1] = r[1] / units
z[2 * i + 1] = r[2] / units
if plot_line:
plot_taylor(r, v, m, u, dt / 2, mu, isp * G0, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v, m = propagate_taylor(r, v, m, u, dt / 2, mu, isp * G0, -10, -10)
x[2 * i + 2] = r[0] / units
y[2 * i + 2] = r[1] / units
z[2 * i + 2] = r[2] / units
x_grid = x[::2]
y_grid = y[::2]
z_grid = z[::2]
x_midpoint = x[1::2]
y_midpoint = y[1::2]
z_midpoint = z[1::2]
axis.scatter(x_grid[:-1], y_grid[:-1], z_grid[:-1], label='nodes', marker='o')
axis.scatter(x_midpoint, y_midpoint, z_midpoint, label='mid-points', marker='x')
axis.scatter(x_grid[-1], y_grid[-1], z_grid[-1], marker='^', c='y', label='mismatch point')
# Backward propagation
# x,y,z will contain the cartesian components of
x = [0.0] * (back_seg * 2 + 1)
y = [0.0] * (back_seg * 2 + 1)
z = [0.0] * (back_seg * 2 + 1)
state = leg.get_xf()
# Final conditions
r = state.r
v = state.v
m = state.m
x[-1] = r[0] / units
y[-1] = r[1] / units
z[-1] = r[2] / units
for i, t in enumerate(throttles[-1:-back_seg - 1:-1]):
dt = (t.end.mjd - t.start.mjd) * DAY2SEC
alpha = min(norm(t.value), 1.0)
if leg.high_fidelity is False:
dV = [max_thrust / m * dt * dumb for dumb in t.value]
if plot_line:
plot_kepler(r, v, -dt / 2, mu, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v = propagate_lagrangian(r, v, -dt / 2, mu)
x[-2 * i - 2] = r[0] / units
y[-2 * i - 2] = r[1] / units
z[-2 * i - 2] = r[2] / units
# v= v+dV
v = [a - b for a, b in zip(v, dV)]
if plot_line:
plot_kepler(r, v, -dt / 2, mu, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v = propagate_lagrangian(r, v, -dt / 2, mu)
x[-2 * i - 3] = r[0] / units
y[-2 * i - 3] = r[1] / units
z[-2 * i - 3] = r[2] / units
m *= exp(norm(dV) / isp / G0)
else:
u = [max_thrust * dumb for dumb in t.value]
if plot_line:
plot_taylor(r, v, m, u, -dt / 2, mu, isp * G0, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v, m = propagate_taylor(r, v, m, u, -dt / 2, mu, isp * G0, -10, -10)
x[-2 * i - 2] = r[0] / units
y[-2 * i - 2] = r[1] / units
z[-2 * i - 2] = r[2] / units
if plot_line:
plot_taylor(r, v, m, u, -dt / 2, mu, isp * G0, N=N, units=units, color=(alpha, 0, 1 - alpha), ax=axis)
r, v, m = propagate_taylor(r, v, m, u, -dt / 2, mu, isp * G0, -10, -10)
x[-2 * i - 3] = r[0] / units
y[-2 * i - 3] = r[1] / units
z[-2 * i - 3] = r[2] / units
x_grid = x[::2]
y_grid = y[::2]
z_grid = z[::2]
x_midpoint = x[1::2]
y_midpoint = y[1::2]
z_midpoint = z[1::2]
axis.scatter(x_grid[1:], y_grid[1:], z_grid[1:], marker='o')
axis.scatter(x_midpoint, y_midpoint, z_midpoint, marker='x')
axis.scatter(x_grid[0], y_grid[0], z_grid[0], marker='^', c='y')
if legend:
axis.legend()
if ax is None: # show only if axis is not set
plt.show()
return axis
| gpl-3.0 |
davidthaler/arboretum | arboretum/datasets/load_data.py | 1 | 2875 | '''
Load functions for datasets that load and split data, ensuring correct
dtype for arboretum (ndarray of float), and splitting larger datasets
into train/test folds.
author: David Thaler
date: September 2017
'''
import numpy as np
import pandas as pd
import os
DATA_DIR = os.path.join(os.path.split(__file__)[0], 'data')
def load_iris(target=1):
'''
Loads the irises data as a binary classification problem
with one of the classes as the target.
This is a small problem suitable for smoke testing.
Args:
target: class number (0, 1 or 2) of the positive class
Returns:
data, x, of shape (150 x 4) and labels y in {0,1} shape (150,)
'''
path = os.path.join(DATA_DIR, 'iris.csv')
iris = pd.read_csv(path)
y = iris.y.values
y = (y == target).astype(float)
x = iris.values[:, 1:]
return x, y
def load_mtcars():
'''
Loads the mtcars data, a small regression problem.
Returns:
data, x, of shape (32 x 10) and targets, y, of shape (32,)
'''
path = os.path.join(DATA_DIR, 'mtcars.csv')
mtcars = pd.read_csv(path)
y = mtcars.mpg.values
x = mtcars.drop(['CarName', 'mpg'], axis=1).values
return x, y
def load_spam():
'''
Loads the spam data, a medium-sized binary classification problem.
Data is split into training (n=3065) and test (n=1536) folds.
Returns:
xtrain (3065x57), ytrain (3065,), xtest (1565x57), ytest(1565,)
'''
path = os.path.join(DATA_DIR, 'spam.csv.gz')
spam = pd.read_csv(path)
xtr = spam[~spam.testid].values.astype(float)[:, 2:]
xte = spam[spam.testid].values.astype(float)[:, 2:]
ytr = spam.spam[~spam.testid].values.astype(float)
yte = spam.spam[spam.testid].values.astype(float)
return xtr, ytr, xte, yte
def load_als():
'''
Loads the als (Lou Gehrig's) data, a medium-sized regression problem.
Data is split into training (n=1197) and test (n=625) folds.
Returns:
xtrain (1197x369), ytrain (1197,), xtest (625x369), ytest(625,)
'''
path = os.path.join(DATA_DIR, 'als.csv.gz')
als = pd.read_csv(path)
xtr = als[~als.testset].values.astype(float)[:, 2:]
xte = als[als.testset].values.astype(float)[:, 2:]
ytr = als.dFRS[~als.testset].values.astype(float)
yte = als.dFRS[als.testset].values.astype(float)
return xtr, ytr, xte, yte
def load_diabetes():
'''
Loads the diabetes dataset and splits it into train and test sets,
with every 3rd row in test.
Returns:
xtrain, ytrain, xtest, ytest
'''
path = os.path.join(DATA_DIR, 'diabetes.csv.gz')
data = pd.read_csv(path)
idx = np.tile([True, True, False], 150)[:len(data)]
xtr = data.values[idx, :-1]
ytr = data.prog.values[idx]
xte = data.values[~idx, :-1]
yte = data.prog.values[~idx]
return xtr, ytr, xte, yte
| mit |
znes/renpass_gis | renpass/examples/scripting/investment.py | 1 | 1793 | # -*- coding: utf-8 -*-
""" This module is designed to contain classes that act as simplified / reduced
energy specific interfaces (facades) for solph components to simplify its
application and work with the oemof datapackage - reader functionality
SPDX-License-Identifier: GPL-3.0-or-later
"""
from renpass import facades as fc
from oemof.solph import EnergySystem, Model
import pandas as pd
# initialise oemof energy system object
es = EnergySystem(timeindex=pd.date_range('2018', freq='H', periods=3))
# buses
el1 = fc.Bus('electricity1')
el2 = fc.Bus('electricity2')
heat = fc.Bus('heat')
biomass = fc.Bus('biomass', balanced=False)
gas = fc.Bus('gas', balanced=False)
st = fc.Dispatchable('st', bus=el1, carrier='biogas', tech='st', capacity=10,
marginal_cost=[0.1, 5, 100],
edge_parameters={
'flow': 10,
'positive_gradient': {
'ub': 0.1,
'costs': 0.2}}, commitable=False)
wind = fc.Volatile('wt', bus=el1, carrier='wind', tech='wind',
capacity_cost=20, profile=[0.1, 0.2, 0],
edge_parameters={'summed_max':10})
sto = fc.Storage('sto', bus=el2, carrier='electricity', tech='battery',
commitable=False, capacity_cost=4, capacity_ratio=0.5)
conv = fc.Conversion('conv', from_bus=el2, to_bus=heat, efficiency=0.95,
capacity=2)
load = fc.Load('load', bus=el1, amount=1000, profile=[0.005, 0.00034, 0.0434])
conn = fc.Connection('conn', from_bus=el1, to_bus=el2, loss=0.07, capacity=100)
es.add(el1, el2, heat, biomass, st, wind, sto, conv, load, conn, gas)
m = Model(es)
m.pprint()
m.write('model.lp', io_options={'symbolic_solver_labels': True})
| gpl-3.0 |
larsmans/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
stscieisenhamer/glue | glue/plugins/tools/spectrum_tool/qt/spectrum_tool.py | 1 | 31628 | from __future__ import absolute_import, division, print_function
import os
import logging
import platform
import traceback
import numpy as np
from qtpy import QtCore, QtGui, QtWidgets, compat
from qtpy.QtCore import Qt
from glue.external.six.moves import range as xrange
from glue.core.exceptions import IncompatibleAttribute
from glue.core import Subset
from glue.core.callback_property import add_callback, ignore_callback
from glue.config import fit_plugin, viewer_tool
from glue.viewers.matplotlib.qt.toolbar import MatplotlibViewerToolbar
from glue.core.qt.mime import LAYERS_MIME_TYPE
from glue.viewers.common.qt.mouse_mode import RoiMode
from glue.utils.qt import load_ui, get_qapp
from glue.core.qt.simpleforms import build_form_item
from glue.utils.qt.widget_properties import CurrentComboProperty
from glue.app.qt.mdi_area import GlueMdiSubWindow
from glue.viewers.matplotlib.qt.widget import MplWidget
from glue.utils import nonpartial, Pointer
from glue.utils.qt import Worker, messagebox_on_error
from glue.core.subset import RoiSubsetState
from glue.core.qt import roi as qt_roi
from .profile_viewer import ProfileViewer
from glue.viewers.image.state import AggregateSlice
from glue.core.aggregate import mom1, mom2
class Extractor(object):
# Warning:
# Coordinate conversion is not well-defined if pix2world is not
# monotonic!
@staticmethod
def abcissa(data, axis):
slc = [0 for _ in data.shape]
slc[axis] = slice(None, None)
att = data.get_world_component_id(axis)
return data[att, tuple(slc)].ravel()
@staticmethod
def spectrum(data, attribute, roi, slc, zaxis):
# Find the integer index of the x and y axes, which are the axes for
# which the image is shown (the ROI is drawn along these attributes)
xaxis = slc.index('x')
yaxis = slc.index('y')
# Get the actual component IDs corresponding to these axes
xatt = data.get_pixel_component_id(xaxis)
yatt = data.get_pixel_component_id(yaxis)
# Set up a view that does not reduce the dimensionality of the array but
# extracts 1-element slices along dimensions that are not relevant.
view = []
for idim, dim in enumerate(slc):
if idim in (xaxis, yaxis, zaxis):
view.append(slice(None))
else:
view.append(slice(dim, dim + 1))
view = tuple(view)
# We now delegate to RoiSubsetState to compute the mask based on the ROI
subset_state = RoiSubsetState(xatt=xatt, yatt=yatt, roi=roi)
mask = subset_state.to_mask(data, view=view)
# We now extract the values that fall inside the ROI. Unfortunately,
# this returns a flat 1-d array, so we need to then reshape it to get
# an array with shape (n_spec, n_pix), where n_pix is the number of
# pixels inside the ROI
values = data[attribute, view]
if zaxis != 0:
values = values.swapaxes(zaxis, 0)
mask = mask.swapaxes(zaxis, 0)
values = values[mask].reshape(data.shape[zaxis], -1)
# We then average along the spatial dimension
spectrum = np.nanmean(values, axis=1)
# Get the world coordinates of the spectral axis
x = Extractor.abcissa(data, zaxis)
return x, spectrum
@staticmethod
def world2pixel(data, axis, value):
x = Extractor.abcissa(data, axis)
if x.size > 1 and (x[1] < x[0]):
x = x[::-1]
result = x.size - np.searchsorted(x, value) - 2
else:
result = np.searchsorted(x, value) - 1
return np.clip(result, 0, x.size - 1)
@staticmethod
def pixel2world(data, axis, value):
x = Extractor.abcissa(data, axis)
return x[np.clip(value, 0, x.size - 1)]
@staticmethod
def subset_spectrum(subset, attribute, slc, zaxis):
"""
Extract a spectrum from a subset.
This makes a mask of the subset in the **current slice**,
and extracts a tube of this shape over all slices along ``zaxis``.
In other words, the variation of the subset along ``zaxis`` is ignored,
and only the interaction of the subset and the slice is relevant.
:param subset: A :class:`~glue.core.subset.Subset`
:param attribute: The :class:`~glue.core.data.ComponentID` to extract
:param slc: A tuple describing the slice
:param zaxis: Which axis to integrate over
"""
data = subset.data
x = Extractor.abcissa(data, zaxis)
view = [slice(s, s + 1)
if s not in ['x', 'y'] else slice(None)
for s in slc]
mask = np.squeeze(subset.to_mask(view))
if slc.index('x') < slc.index('y'):
mask = mask.T
w = np.where(mask)
view[slc.index('x')] = w[1]
view[slc.index('y')] = w[0]
result = np.empty(x.size)
# treat each channel separately, to reduce memory storage
for i in xrange(data.shape[zaxis]):
view[zaxis] = i
val = data[attribute, view]
result[i] = np.nansum(val) / np.isfinite(val).sum()
y = result
return x, y
class SpectrumContext(object):
"""
Base class for different interaction contexts
"""
viewer_state = Pointer('main.viewer_state')
data = Pointer('main.data')
profile_axis = Pointer('main.profile_axis')
canvas = Pointer('main.canvas')
profile = Pointer('main.profile')
def __init__(self, main):
self.main = main
self.grip = None
self.panel = None
self.widget = None
self._setup_grip()
self._setup_widget()
self._connect()
def _setup_grip(self):
""" Create a :class:`~glue.plugins.tools.spectrum_tool.profile_viewer.Grip` object
to interact with the plot. Assign to self.grip
"""
raise NotImplementedError()
def _setup_widget(self):
"""
Create a context-specific widget
"""
# this is the widget that is displayed to the right of the
# spectrum
raise NotImplementedError()
def _connect(self):
"""
Attach event handlers
"""
pass
def set_enabled(self, enabled):
self.enable() if enabled else self.disable()
def enable(self):
if self.grip is not None:
self.grip.enable()
def disable(self):
if self.grip is not None:
self.grip.disable()
def recenter(self, lim):
"""Re-center the grip to the given x axlis limit tuple"""
if self.grip is None:
return
if hasattr(self.grip, 'value'):
self.grip.value = sum(lim) / 2.
return
# Range grip
cen = sum(lim) / 2
wid = max(lim) - min(lim)
self.grip.range = cen - wid / 4, cen + wid / 4
class NavContext(SpectrumContext):
"""
Mode to set the 2D slice in the parent image widget by dragging
a handle in the spectrum
"""
def _setup_grip(self):
def _set_state_from_grip(value):
"""Update state.slices given grip value"""
if not self.main.enabled:
return
slc = list(self.viewer_state.slices)
# state.slices stored in pixel coords
value = Extractor.world2pixel(
self.data,
self.profile_axis, value)
slc[self.profile_axis] = value
# prevent callback bouncing. Fixes #298
self.viewer_state.slices = tuple(slc)
def _set_grip_from_state(slc):
"""Update grip.value given state.slices"""
if not self.main.enabled:
return
# grip.value is stored in world coordinates
val = slc[self.profile_axis]
if isinstance(val, AggregateSlice):
val = val.center
val = Extractor.pixel2world(self.data, self.profile_axis, val)
# If pix2world not monotonic, this can trigger infinite recursion.
# Avoid by disabling callback loop
# XXX better to specifically ignore _set_state_from_grip
with ignore_callback(self.grip, 'value'):
self.grip.value = val
self.grip = self.main.profile.new_value_grip()
add_callback(self.viewer_state, 'slices', _set_grip_from_state)
add_callback(self.grip, 'value', _set_state_from_grip)
def _connect(self):
pass
def _setup_widget(self):
self.widget = QtWidgets.QTextEdit()
self.widget.setHtml("To <b> slide </b> through the cube, "
"drag the handle or double-click<br><br><br>"
"To make a <b> new profile </b>, "
"click-drag a new box in the image, or drag "
"a subset onto the plot to the left")
self.widget.setTextInteractionFlags(Qt.NoTextInteraction)
class CollapseContext(SpectrumContext):
"""
Mode to collapse a section of a cube into a 2D image.
Supports several aggregations: mean, median, max, mom1, mom2
"""
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
w = QtWidgets.QWidget()
l = QtWidgets.QFormLayout()
w.setLayout(l)
combo = QtWidgets.QComboBox()
combo.addItem("Mean", userData=np.mean)
combo.addItem("Median", userData=np.median)
combo.addItem("Max", userData=np.max)
combo.addItem("Centroid", userData=mom1)
combo.addItem("Linewidth", userData=mom2)
run = QtWidgets.QPushButton("Collapse")
save = QtWidgets.QPushButton("Save as FITS file")
buttons = QtWidgets.QHBoxLayout()
buttons.addWidget(run)
buttons.addWidget(save)
self._save = save
self._run = run
l.addRow("", combo)
l.addRow("", buttons)
self.widget = w
self._combo = combo
self._collapsed_viewer = None
def _connect(self):
self._run.clicked.connect(nonpartial(self._aggregate))
self._save.clicked.connect(nonpartial(self._choose_save))
@property
def aggregator(self):
return self._combo.itemData(self._combo.currentIndex())
@property
def aggregator_label(self):
return self._combo.currentText()
def _aggregate(self):
func = self.aggregator
rng = list(self.grip.range)
rng = Extractor.world2pixel(self.data,
self.profile_axis,
rng)
rng[1] += 1
slices = list(self.viewer_state.slices)
current_slice = slices[self.profile_axis]
if isinstance(current_slice, AggregateSlice):
current_slice = current_slice.center
slices[self.profile_axis] = AggregateSlice(slice(*rng),
current_slice,
func)
self.viewer_state.slices = tuple(slices)
# Save a local copy of the collapsed array
for layer_state in self.viewer_state.layers:
if layer_state.layer is self.viewer_state.reference_data:
break
else:
raise Exception("Couldn't find layer corresponding to reference data")
self._agg = layer_state.get_sliced_data()
@messagebox_on_error("Failed to export projection")
def _choose_save(self):
self._aggregate()
out, _ = compat.getsavefilename(filters='FITS Files (*.fits)')
if not out:
return
self.save_to(out)
def save_to(self, pth):
"""
Write the projection to a file
Parameters
----------
pth : str
Path to write to
"""
from astropy.io import fits
data = self.viewer_state.reference_data
if data is None:
raise RuntimeError("Cannot save projection -- no data to visualize")
self._aggregate()
# try to project wcs to 2D
wcs = getattr(data.coords, 'wcs', None)
if wcs:
try:
wcs.dropaxis(data.ndim - 1 - self.main.profile_axis)
header = wcs.to_header(True)
except Exception as e:
msg = "Could not extract 2D wcs for this data: %s" % e
logging.getLogger(__name__).warn(msg)
header = fits.Header()
else:
header = fits.Header()
lo, hi = self.grip.range
history = ('Created by Glue. %s projection over channels %i-%i of axis %i. Slice=%s' %
(self.aggregator_label, lo, hi, self.main.profile_axis, self.viewer_state.slices))
header.add_history(history)
try:
fits.writeto(pth, self._agg, header, overwrite=True)
except TypeError:
fits.writeto(pth, self._agg, header, clobber=True)
class ConstraintsWidget(QtWidgets.QWidget):
"""
A widget to display and tweak the constraints of a :class:`~glue.core.fitters.BaseFitter1D`
"""
def __init__(self, constraints, parent=None):
"""
Parameters
----------
constraints : dict
The `contstraints` property of a :class:`~glue.core.fitters.BaseFitter1D`
object
parent : QtWidgets.QWidget (optional)
The parent of this widget
"""
super(ConstraintsWidget, self).__init__(parent)
self.constraints = constraints
self.layout = QtWidgets.QGridLayout()
self.layout.setContentsMargins(2, 2, 2, 2)
self.layout.setSpacing(4)
self.setLayout(self.layout)
self.layout.addWidget(QtWidgets.QLabel("Estimate"), 0, 1)
self.layout.addWidget(QtWidgets.QLabel("Fixed"), 0, 2)
self.layout.addWidget(QtWidgets.QLabel("Bounded"), 0, 3)
self.layout.addWidget(QtWidgets.QLabel("Lower Bound"), 0, 4)
self.layout.addWidget(QtWidgets.QLabel("Upper Bound"), 0, 5)
self._widgets = {}
names = sorted(list(self.constraints.keys()))
for k in names:
row = []
w = QtWidgets.QLabel(k)
row.append(w)
v = QtGui.QDoubleValidator()
e = QtWidgets.QLineEdit()
e.setValidator(v)
e.setText(str(constraints[k]['value'] or ''))
row.append(e)
w = QtWidgets.QCheckBox()
w.setChecked(constraints[k]['fixed'])
fix = w
row.append(w)
w = QtWidgets.QCheckBox()
limits = constraints[k]['limits']
w.setChecked(limits is not None)
bound = w
row.append(w)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[0]))
row.append(e)
e = QtWidgets.QLineEdit()
e.setValidator(v)
if limits is not None:
e.setText(str(limits[1]))
row.append(e)
def unset(w):
def result(active):
if active:
w.setChecked(False)
return result
fix.toggled.connect(unset(bound))
bound.toggled.connect(unset(fix))
self._widgets[k] = row
for i, row in enumerate(names, 1):
for j, widget in enumerate(self._widgets[row]):
self.layout.addWidget(widget, i, j)
def settings(self, name):
""" Return the constraints for a single model parameter """
row = self._widgets[name]
name, value, fixed, limited, lo, hi = row
value = float(value.text()) if value.text() else None
fixed = fixed.isChecked()
limited = limited.isChecked()
lo = lo.text()
hi = hi.text()
limited = limited and not ((not lo) or (not hi))
limits = None if not limited else [float(lo), float(hi)]
return dict(value=value, fixed=fixed, limits=limits)
def update_constraints(self, fitter):
""" Update the constraints in a :class:`~glue.core.fitters.BaseFitter1D`
based on the settings in this widget
"""
for name in self._widgets:
s = self.settings(name)
fitter.set_constraint(name, **s)
class FitSettingsWidget(QtWidgets.QDialog):
def __init__(self, fitter, parent=None):
super(FitSettingsWidget, self).__init__(parent)
self.fitter = fitter
self._build_form()
self._connect()
self.setModal(True)
def _build_form(self):
fitter = self.fitter
l = QtWidgets.QFormLayout()
options = fitter.options
self.widgets = {}
self.forms = {}
for k in sorted(options):
item = build_form_item(fitter, k)
l.addRow(item.label, item.widget)
self.widgets[k] = item.widget
self.forms[k] = item # need to prevent garbage collection
constraints = fitter.constraints
if constraints:
self.constraints = ConstraintsWidget(constraints)
l.addRow(self.constraints)
else:
self.constraints = None
self.okcancel = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
l.addRow(self.okcancel)
self.setLayout(l)
def _connect(self):
self.okcancel.accepted.connect(self.accept)
self.okcancel.rejected.connect(self.reject)
self.accepted.connect(self.update_fitter_from_settings)
def update_fitter_from_settings(self):
for k, v in self.widgets.items():
setattr(self.fitter, k, v.value())
if self.constraints is not None:
self.constraints.update_constraints(self.fitter)
class FitContext(SpectrumContext):
"""
Mode to fit a range of a spectrum with a model fitter.
Fitters are taken from user-defined fit plugins, or
:class:`~glue.core.fitters.BaseFitter1D` subclasses
"""
error = CurrentComboProperty('ui.uncertainty_combo')
fitter = CurrentComboProperty('ui.profile_combo')
def _setup_grip(self):
self.grip = self.main.profile.new_range_grip()
def _setup_widget(self):
self.ui = load_ui('spectrum_fit_panel.ui', None,
directory=os.path.dirname(__file__))
self.ui.uncertainty_combo.hide()
self.ui.uncertainty_label.hide()
font = QtGui.QFont("Courier")
font.setStyleHint(font.Monospace)
self.ui.results_box.document().setDefaultFont(font)
self.ui.results_box.setLineWrapMode(self.ui.results_box.NoWrap)
self.widget = self.ui
for fitter in list(fit_plugin):
self.ui.profile_combo.addItem(fitter.label,
userData=fitter())
def _edit_model_options(self):
d = FitSettingsWidget(self.fitter)
d.exec_()
def _connect(self):
self.ui.fit_button.clicked.connect(nonpartial(self.fit))
self.ui.clear_button.clicked.connect(nonpartial(self.clear))
self.ui.settings_button.clicked.connect(
nonpartial(self._edit_model_options))
def fit(self):
"""
Fit a model to the data
The fitting happens on a dedicated thread, to keep the UI
responsive
"""
xlim = self.grip.range
fitter = self.fitter
def on_success(result):
fit_result, _, _, _ = result
self._report_fit(fitter.summarize(*result))
self.main.profile.plot_fit(fitter, fit_result)
def on_fail(exc_info):
exc = '\n'.join(traceback.format_exception(*exc_info))
self._report_fit("Error during fitting:\n%s" % exc)
def on_done():
self.ui.fit_button.setText("Fit")
self.ui.fit_button.setEnabled(True)
self.canvas.draw()
self.ui.fit_button.setText("Running...")
self.ui.fit_button.setEnabled(False)
w = Worker(self.main.profile.fit, fitter, xlim=xlim)
w.result.connect(on_success)
w.error.connect(on_fail)
w.finished.connect(on_done)
self._fit_worker = w # hold onto a reference
w.start()
def _report_fit(self, report):
self.ui.results_box.document().setPlainText(report)
def clear(self):
self.ui.results_box.document().setPlainText('')
self.main.profile.clear_fit()
self.canvas.draw()
class SpectrumMainWindow(QtWidgets.QMainWindow):
"""
The main window that the spectrum viewer is embedded in.
Defines two signals to trigger when a subset is dropped into the window,
and when the window is closed.
"""
subset_dropped = QtCore.Signal(object)
window_closed = QtCore.Signal()
def __init__(self, parent=None):
super(SpectrumMainWindow, self).__init__(parent=parent)
self.setAcceptDrops(True)
def closeEvent(self, event):
self.window_closed.emit()
return super(SpectrumMainWindow, self).closeEvent(event)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
layer = event.mimeData().data(LAYERS_MIME_TYPE)[0]
if isinstance(layer, Subset):
self.subset_dropped.emit(layer)
def set_status(self, message):
sb = self.statusBar()
sb.showMessage(message)
@viewer_tool
class SpectrumExtractorMode(RoiMode):
"""
Lets the user select a region in an image and, when connected to a
SpectrumExtractorTool, uses this to display spectra extracted from that
position
"""
persistent = True
icon = 'glue_spectrum'
tool_id = 'spectrum'
action_text = 'Spectrum'
tool_tip = 'Extract a spectrum from the selection'
shortcut = 'S'
def __init__(self, viewer, **kwargs):
super(SpectrumExtractorMode, self).__init__(viewer, **kwargs)
self._roi_tool = qt_roi.QtRectangularROI(self._axes) # default
self._tool = SpectrumTool(self.viewer, self)
self._release_callback = self._tool._update_profile
self._move_callback = self._tool._move_profile
self._roi_callback = None
self.viewer.state.add_callback('reference_data', self._on_reference_data_change)
def _on_reference_data_change(self, reference_data):
if reference_data is not None:
self.enabled = reference_data.ndim == 3
def menu_actions(self):
result = []
a = QtWidgets.QAction('Rectangle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Rectangle'))
result.append(a)
a = QtWidgets.QAction('Circle', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Circle'))
result.append(a)
a = QtWidgets.QAction('Polygon', None)
a.triggered.connect(nonpartial(self.set_roi_tool, 'Polygon'))
result.append(a)
for r in result:
if self._move_callback is not None:
r.triggered.connect(nonpartial(self._move_callback, self))
return result
def set_roi_tool(self, mode):
if mode is 'Rectangle':
self._roi_tool = qt_roi.QtRectangularROI(self._axes)
if mode is 'Circle':
self._roi_tool = qt_roi.QtCircularROI(self._axes)
if mode is 'Polygon':
self._roi_tool = qt_roi.QtPolygonalROI(self._axes)
self._roi_tool.plot_opts.update(edgecolor='#c51b7d',
facecolor=None,
edgewidth=3,
alpha=1.0)
def close(self):
self._tool.close()
return super(SpectrumExtractorMode, self).close()
# TODO: refactor this so that we don't have a separate tool and mode
class SpectrumTool(object):
"""
Main widget for interacting with spectra extracted from an image.
Provides different contexts for interacting with the spectrum:
*navigation context* lets the user set the slice in the parent image
by dragging a bar on the spectrum
*fit context* lets the user fit models to a portion of the spectrum
*collapse context* lets the users collapse a section of a cube to a 2D image
"""
def __init__(self, image_viewer, mouse_mode):
self._relim_requested = True
self.image_viewer = image_viewer
self.viewer_state = self.image_viewer.state
self.image_viewer.window_closed.connect(self.close)
self._build_main_widget()
self.profile = ProfileViewer(self.canvas.fig)
self.axes = self.profile.axes
self.mouse_mode = mouse_mode
self._setup_toolbar()
self._setup_ctxbar()
self._connect()
w = self.image_viewer.session.application.add_widget(self,
label='Profile')
w.close()
def close(self):
if hasattr(self, '_mdi_wrapper'):
self._mdi_wrapper.close()
else:
self.widget.close()
self.image_viewer = None
@property
def enabled(self):
"""Return whether the window is visible and active"""
return self.widget.isVisible()
def mdi_wrap(self):
sub = GlueMdiSubWindow()
sub.setWidget(self.widget)
self.widget.destroyed.connect(sub.close)
sub.resize(self.widget.size())
self._mdi_wrapper = sub
return sub
def _build_main_widget(self):
self.widget = SpectrumMainWindow()
self.widget.window_closed.connect(self.reset)
w = QtWidgets.QWidget()
l = QtWidgets.QHBoxLayout()
l.setSpacing(2)
l.setContentsMargins(2, 2, 2, 2)
w.setLayout(l)
mpl = MplWidget()
self.canvas = mpl.canvas
l.addWidget(mpl)
l.setStretchFactor(mpl, 5)
self.widget.setCentralWidget(w)
# TODO: fix hacks
w.canvas = self.canvas
self.widget.central_widget = w
def _setup_ctxbar(self):
l = self.widget.centralWidget().layout()
self._contexts = [NavContext(self),
FitContext(self),
CollapseContext(self)]
tabs = QtWidgets.QTabWidget(parent=self.widget)
# The following is needed because of a bug in Qt which means that
# tab titles don't get scaled right.
if platform.system() == 'Darwin':
app = get_qapp()
app_font = app.font()
tabs.setStyleSheet('font-size: {0}px'.format(app_font.pointSize()))
tabs.addTab(self._contexts[0].widget, 'Navigate')
tabs.addTab(self._contexts[1].widget, 'Fit')
tabs.addTab(self._contexts[2].widget, 'Collapse')
self._tabs = tabs
self._tabs.setVisible(False)
l.addWidget(tabs)
l.setStretchFactor(tabs, 0)
def _connect(self):
add_callback(self.viewer_state, 'x_att',
self.reset)
add_callback(self.viewer_state, 'y_att',
self.reset)
def _on_tab_change(index):
for i, ctx in enumerate(self._contexts):
ctx.set_enabled(i == index)
if i == index:
self.profile.active_grip = ctx.grip
self._tabs.currentChanged.connect(_on_tab_change)
_on_tab_change(self._tabs.currentIndex())
self.widget.subset_dropped.connect(self._extract_subset_profile)
def _setup_toolbar(self):
tb = MatplotlibViewerToolbar(self.widget)
# disable ProfileViewer mouse processing during mouse modes
tb.tool_activated.connect(self.profile.disconnect)
tb.tool_deactivated.connect(self.profile.connect)
self._menu_toggle_action = QtWidgets.QAction("Options", tb)
self._menu_toggle_action.setCheckable(True)
self._menu_toggle_action.toggled.connect(self._toggle_menu)
tb.addAction(self._menu_toggle_action)
self.widget.addToolBar(tb)
return tb
def _toggle_menu(self, active):
self._tabs.setVisible(active)
def reset(self, *args):
self.hide()
self.mouse_mode.clear()
self._relim_requested = True
@property
def data(self):
return self.viewer_state.reference_data
@property
def profile_axis(self):
# XXX make this settable
# defaults to the non-xy axis with the most channels
try:
slc = self.viewer_state.wcsaxes_slice[::-1]
except AttributeError:
return None
candidates = [i for i, s in enumerate(slc) if s not in ['x', 'y']]
return max(candidates, key=lambda i: self.data.shape[i])
def _recenter_grips(self):
for ctx in self._contexts:
ctx.recenter(self.axes.get_xlim())
def _extract_subset_profile(self, subset):
slc = self.viewer_state.slices
try:
x, y = Extractor.subset_spectrum(subset,
self.viewer_state.display_attribute,
slc,
self.profile_axis)
except IncompatibleAttribute:
return
self._set_profile(x, y)
def _update_from_roi(self, roi):
data = self.data
att = self.viewer_state.layers[0].attribute
slc = self.viewer_state.wcsaxes_slice[::-1]
if data is None or att is None:
return
zax = self.profile_axis
x, y = Extractor.spectrum(data, att, roi, slc, zax)
self._set_profile(x, y)
def _update_profile(self, *args):
roi = self.mouse_mode.roi()
return self._update_from_roi(roi)
def _move_profile(self, *args):
if self.mouse_mode._roi_tool._scrubbing:
self._update_profile(args)
def _set_profile(self, x, y):
data = self.data
xid = data.get_world_component_id(self.profile_axis)
units = data.get_component(xid).units
xlabel = str(xid) if units is None else '%s [%s]' % (xid, units)
xlim = self.axes.get_xlim()
self.profile.set_xlabel(xlabel)
self.profile.set_profile(x, y, color='k')
# relim x range if requested
if self._relim_requested:
self._relim_requested = False
self.axes.set_xlim(np.nanmin(x), np.nanmax(x))
# relim y range to data within the view window
self.profile.autoscale_ylim()
if self.axes.get_xlim() != xlim:
self._recenter_grips()
self.axes.figure.canvas.draw()
self.show()
def _move_below_image_viewer(self):
rect = self.image_viewer.frameGeometry()
pos = rect.bottomLeft()
self._mdi_wrapper.setGeometry(pos.x(), pos.y(),
rect.width(), 300)
def show(self):
if self.widget.isVisible():
return
self._move_below_image_viewer()
self.widget.show()
def hide(self):
if hasattr(self, '_mdi_wrapper'):
self._mdi_wrapper.close()
else:
self.widget.close()
def _get_modes(self, axes):
return [self.mouse_mode]
| bsd-3-clause |
pprett/sparklingpandas | sparklingpandas/test/pcontexttests.py | 2 | 1279 | """
Test methods in pcontext
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.test.sparklingpandastestcase import \
SparklingPandasTestCase
class PContextTests(SparklingPandasTestCase):
def test_dataframe_construction(self):
input = [("tea", "happy"), ("water", "sad"), ("coffee", "happiest")]
prdd = self.psc.DataFrame(input, columns=['magic', 'thing'])
elements = prdd.collect()
assert len(elements) == 3
assert sorted(elements['magic']) == ['coffee', 'tea', 'water']
| apache-2.0 |
alekz112/statsmodels | statsmodels/sandbox/regression/penalized.py | 31 | 12127 | # -*- coding: utf-8 -*-
"""linear model with Theil prior probabilistic restrictions, generalized Ridge
Created on Tue Dec 20 00:10:10 2011
Author: Josef Perktold
License: BSD-3
open issues
* selection of smoothing factor, strength of prior, cross validation
* GLS, does this really work this way
* None of inherited results have been checked yet,
I'm not sure if any need to be adjusted or if only interpretation changes
One question is which results are based on likelihood (residuals) and which
are based on "posterior" as for example bse and cov_params
* helper functions to construct priors?
* increasing penalization for ordered regressors, e.g. polynomials
* compare with random/mixed effects/coefficient, like estimated priors
there is something fishy with the result instance, some things, e.g.
normalized_cov_params, don't look like they update correctly as we
search over lambda -> some stale state again ?
I added df_model to result class using the hatmatrix, but df_model is defined
in model instance not in result instance. -> not clear where refactoring should
occur. df_resid doesn't get updated correctly.
problem with definition of df_model, it has 1 subtracted for constant
"""
from __future__ import print_function
from statsmodels.compat.python import lrange
import numpy as np
import statsmodels.base.model as base
from statsmodels.regression.linear_model import OLS, GLS, RegressionResults
def atleast_2dcols(x):
x = np.asarray(x)
if x.ndim == 1:
x = x[:,None]
return x
class TheilGLS(GLS):
'''GLS with probabilistic restrictions
essentially Bayes with informative prior
note: I'm making up the GLS part, might work only for OLS
'''
def __init__(self, endog, exog, r_matrix, q_matrix=None, sigma_prior=None, sigma=None):
self.r_matrix = np.asarray(r_matrix)
self.q_matrix = atleast_2dcols(q_matrix)
if np.size(sigma_prior) == 1:
sigma_prior = sigma_prior * np.eye(self.r_matrix.shape[0]) #no numerical shortcuts
self.sigma_prior = sigma_prior
self.sigma_prior_inv = np.linalg.pinv(sigma_prior) #or inv
super(self.__class__, self).__init__(endog, exog, sigma=sigma)
def fit(self, lambd=1.):
#this does duplicate transformation, but I need resid not wresid
res_gls = GLS(self.endog, self.exog, sigma=self.sigma).fit()
self.res_gls = res_gls
sigma2_e = res_gls.mse_resid
r_matrix = self.r_matrix
q_matrix = self.q_matrix
sigma_prior_inv = self.sigma_prior_inv
x = self.wexog
y = self.wendog[:,None]
#why are sigma2_e * lambd multiplied, not ratio?
#larger lambd -> stronger prior (it's not the variance)
#print('lambd inside fit', lambd
xpx = np.dot(x.T, x) + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, r_matrix))
xpy = np.dot(x.T, y) + \
sigma2_e * lambd * np.dot(r_matrix.T, np.dot(sigma_prior_inv, q_matrix))
#xpy = xpy[:,None]
xpxi = np.linalg.pinv(xpx)
params = np.dot(xpxi, xpy) #or solve
params = np.squeeze(params)
self.normalized_cov_params = xpxi #why attach it to self, i.e. model?
lfit = TheilRegressionResults(self, params,
normalized_cov_params=xpxi)
lfit.penalization_factor = lambd
return lfit
def fit_minic(self):
#this doesn't make sense, since number of parameters stays unchanged
#need leave-one-out, gcv; or some penalization for weak priors
#added extra penalization for lambd
def get_bic(lambd):
#return self.fit(lambd).bic #+lambd #+ 1./lambd #added 1/lambd for checking
#return self.fit(lambd).gcv()
#return self.fit(lambd).cv()
return self.fit(lambd).aicc()
from scipy import optimize
lambd = optimize.fmin(get_bic, 1.)
return lambd
#TODO:
#I need the hatmatrix in the model if I want to do iterative fitting, e.g. GCV
#move to model or use it from a results instance inside the model,
# each call to fit returns results instance
class TheilRegressionResults(RegressionResults):
#cache
def hatmatrix_diag(self):
'''
diag(X' xpxi X)
where xpxi = (X'X + lambd * sigma_prior)^{-1}
Notes
-----
uses wexog, so this includes weights or sigma - check this case
not clear whether I need to multiply by sigmahalf, i.e.
(W^{-0.5} X) (X' W X)^{-1} (W^{-0.5} X)' or
(W X) (X' W X)^{-1} (W X)'
projection y_hat = H y or in terms of transformed variables (W^{-0.5} y)
might be wrong for WLS and GLS case
'''
xpxi = self.model.normalized_cov_params
#something fishy with self.normalized_cov_params in result, doesn't update
#print(self.model.wexog.shape, np.dot(xpxi, self.model.wexog.T).shape
return (self.model.wexog * np.dot(xpxi, self.model.wexog.T).T).sum(1)
def hatmatrix_trace(self):
return self.hatmatrix_diag().sum()
#this doesn't update df_resid
@property #needs to be property or attribute (no call)
def df_model(self):
return self.hatmatrix_trace()
#Note: mse_resid uses df_resid not nobs-k_vars, which might differ if df_model, tr(H), is used
#in paper for gcv ess/nobs is used instead of mse_resid
def gcv(self):
return self.mse_resid / (1. - self.hatmatrix_trace() / self.nobs)**2
def cv(self):
return ((self.resid / (1. - self.hatmatrix_diag()))**2).sum() / self.nobs
def aicc(self):
aic = np.log(self.mse_resid) + 1
aic += 2 * (1. + self.hatmatrix_trace()) / (self.nobs - self.hatmatrix_trace() -2)
return aic
#contrast/restriction matrices, temporary location
def coef_restriction_meandiff(n_coeffs, n_vars=None, position=0):
reduced = np.eye(n_coeffs) - 1./n_coeffs
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
def coef_restriction_diffbase(n_coeffs, n_vars=None, position=0, base_idx=0):
reduced = -np.eye(n_coeffs) #make all rows, drop one row later
reduced[:, base_idx] = 1
keep = lrange(n_coeffs)
del keep[base_idx]
reduced = np.take(reduced, keep, axis=0)
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs-1, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
def next_odd(d):
return d + (1 - d % 2)
def coef_restriction_diffseq(n_coeffs, degree=1, n_vars=None, position=0, base_idx=0):
#check boundaries, returns "valid" ?
if degree == 1:
diff_coeffs = [-1, 1]
n_points = 2
elif degree > 1:
from scipy import misc
n_points = next_odd(degree + 1) #next odd integer after degree+1
diff_coeffs = misc.central_diff_weights(n_points, ndiv=degree)
dff = np.concatenate((diff_coeffs, np.zeros(n_coeffs - len(diff_coeffs))))
from scipy import linalg
reduced = linalg.toeplitz(dff, np.zeros(n_coeffs - len(diff_coeffs) + 1)).T
#reduced = np.kron(np.eye(n_coeffs-n_points), diff_coeffs)
if n_vars is None:
return reduced
else:
full = np.zeros((n_coeffs-1, n_vars))
full[:, position:position+n_coeffs] = reduced
return full
##
## R = np.c_[np.zeros((n_groups, k_vars-1)), np.eye(n_groups)]
## r = np.zeros(n_groups)
## R = np.c_[np.zeros((n_groups-1, k_vars)),
## np.eye(n_groups-1)-1./n_groups * np.ones((n_groups-1, n_groups-1))]
if __name__ == '__main__':
import numpy as np
import statsmodels.api as sm
examples = [2]
np.random.seed(765367)
np.random.seed(97653679)
nsample = 100
x = np.linspace(0,10, nsample)
X = sm.add_constant(np.column_stack((x, x**2, (x/5.)**3)), prepend=True)
beta = np.array([10, 1, 0.1, 0.5])
y = np.dot(X, beta) + np.random.normal(size=nsample)
res_ols = sm.OLS(y, X).fit()
R = [[0, 0, 0 , 1]]
r = [0] #, 0, 0 , 0]
lambd = 1 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print(res_ols.params)
print(res.params)
#example 2
#I need more flexible penalization in example, the penalization should
#get stronger for higher order terms
#np.random.seed(1)
nobs = 200
k_vars = 10
k_true = 6
sig_e = 0.25 #0.5
x = np.linspace(-2,2, nobs)
#X = sm.add_constant(np.column_stack((x, x**2, (x/5.)**3)), prepend=True)
X = (x/x.max())[:,None]**np.arange(k_vars)
beta = np.zeros(k_vars)
beta[:k_true] = np.array([1, -2, 0.5, 1.5, -0.1, 0.1])[:k_true]
y_true = np.dot(X, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
res_ols = sm.OLS(y, X).fit()
#R = np.c_[np.zeros((k_vars-4, 4)), np.eye(k_vars-4)] # has two large true coefficients penalized
not_penalized = 4
R = np.c_[np.zeros((k_vars-not_penalized, not_penalized)), np.eye(k_vars-not_penalized)]
#increasingly strong penalization
R = np.c_[np.zeros((k_vars-not_penalized, not_penalized)), np.diag((1+2*np.arange(k_vars-not_penalized)))]
r = np.zeros(k_vars-not_penalized)
## R = -coef_restriction_diffseq(6, 1, n_vars=10, position=4) #doesn't make sense for polynomial
## R = np.vstack((R, np.zeros(R.shape[1])))
## R[-1,-1] = 1
r = np.zeros(R.shape[0])
lambd = 2 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print(res_ols.params)
print(res.params)
res_bic = mod.fit_minic() #this will just return zero
res = mod.fit(res_bic)
print(res_bic)
for lambd in np.linspace(0, 80, 21):
res_l = mod.fit(lambd)
#print(lambd, res_l.params[-2:], res_l.bic, res_l.bic + 1./lambd, res.df_model
print((lambd, res_l.params[-2:], res_l.bic, res.df_model, np.trace(res.normalized_cov_params)))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(beta, 'k-o', label='true')
plt.plot(res_ols.params, '-o', label='ols')
plt.plot(res.params, '-o', label='theil')
plt.legend()
plt.title('Polynomial fitting: estimated coefficients')
plt.figure()
plt.plot(y, 'o')
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, '-', label='ols')
plt.plot(res.fittedvalues, '-', label='theil')
plt.legend()
plt.title('Polynomial fitting: fitted values')
#plt.show()
if 3 in examples:
#example 3
nobs = 600
nobs_i = 20
n_groups = nobs // nobs_i
k_vars = 3
from statsmodels.sandbox.panel.random_panel import PanelSample
dgp = PanelSample(nobs, k_vars, n_groups)
dgp.group_means = 2 + np.random.randn(n_groups) #add random intercept
print('seed', dgp.seed)
y = dgp.generate_panel()
X = np.column_stack((dgp.exog[:,1:],
dgp.groups[:,None] == np.arange(n_groups)))
res_ols = sm.OLS(y, X).fit()
R = np.c_[np.zeros((n_groups, k_vars-1)), np.eye(n_groups)]
r = np.zeros(n_groups)
R = np.c_[np.zeros((n_groups-1, k_vars)),
np.eye(n_groups-1)-1./n_groups * np.ones((n_groups-1, n_groups-1))]
r = np.zeros(n_groups-1)
R[:, k_vars-1] = -1
lambd = 1 #1e-4
mod = TheilGLS(y, X, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
print(res.params)
params_l = []
for lambd in np.linspace(0, 20, 21):
params_l.append(mod.fit(5.*lambd).params)
params_l = np.array(params_l)
plt.figure()
plt.plot(params_l.T)
plt.title('Panel Data with random intercept: shrinkage to being equal')
plt.xlabel('parameter index')
plt.figure()
plt.plot(params_l[:,k_vars:])
plt.title('Panel Data with random intercept: shrinkage to being equal')
plt.xlabel('strength of prior')
#plt.show()
| bsd-3-clause |
JanNash/sms-tools | lectures/03-Fourier-properties/plots-code/shift.py | 26 | 1223 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.signal import sawtooth
sys.path.append('../../../software/models/')
import dftModel as DF
N = 128
x1 = sawtooth(2*np.pi*np.arange(-N/2,N/2)/float(N))
x2 = sawtooth(2*np.pi*np.arange(-N/2-2,N/2-2)/float(N))
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(321)
plt.title('x1=x[n]')
plt.plot(np.arange(-N/2, N/2, 1.0), x1, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(322)
plt.title('x2=x[n-2]')
plt.plot(np.arange(-N/2, N/2, 1.0), x2, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(323)
plt.title('mX1')
plt.plot(np.arange(0, mX1.size, 1.0), mX1, 'r', lw=1.5)
plt.axis([0,mX1.size,min(mX1),max(mX1)])
plt.subplot(324)
plt.title('mX2')
plt.plot(np.arange(0, mX2.size, 1.0), mX2, 'r', lw=1.5)
plt.axis([0,mX2.size,min(mX2),max(mX2)])
plt.subplot(325)
plt.title('pX1')
plt.plot(np.arange(0, pX1.size, 1.0), pX1, 'c', lw=1.5)
plt.axis([0,pX1.size,min(pX1),max(pX2)])
plt.subplot(326)
plt.title('pX2')
plt.plot(np.arange(0, pX2.size, 1.0), pX2, 'c', lw=1.5)
plt.axis([0,pX2.size,min(pX2),max(pX2)])
plt.tight_layout()
plt.savefig('shift.png')
plt.show()
| agpl-3.0 |
nhejazi/scikit-learn | sklearn/datasets/mldata.py | 32 | 8031 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home
from ..utils import Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename.
Parameters
----------
dataname : str
Name of dataset
Returns
-------
fname : str
The converted dataname.
"""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname : str
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
zhreshold/mxnet | python/mxnet/symbol/numpy/_symbol.py | 1 | 284016 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy namespace for operators used in Gluon APIs dispatched by F=symbol module."""
import ctypes
import numpy as _np
from . import _op as _mx_np_op
from ...base import _LIB, SymbolHandle, numeric_types, mx_uint, integer_types, string_types
from ...base import c_str
from ...base import py_str
from ...util import check_call, set_module, _sanity_check_params
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...util import is_np_default_dtype
from ...context import current_context
from ..symbol import Symbol, Group
from .._internal import _set_np_symbol_class
from . import _internal as _npi
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
__all__ = ['zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'bitwise_not', 'invert',
'delete', 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod',
'power', 'arctan2', 'trace', 'transpose',
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp',
'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'matmul', 'median',
'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram', 'insert',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort', 'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'any', 'all', 'around', 'round', 'round_',
'flatnonzero', 'tril_indices', 'amax', 'amin', 'max', 'min', 'logical_and', 'logical_or', 'logical_xor',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'interp',
'tril', 'triu', 'tri', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'cross', 'kron',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum',
'true_divide', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d',
'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'squeeze',
'where', 'bincount', 'rollaxis', 'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'diag', 'diagonal']
@set_module('mxnet.symbol.numpy')
class _Symbol(Symbol):
def __getitem__(self, key): # pylint: disable = too-many-return-statements, inconsistent-return-statements
"""Return self[key].
If the symbol is a symbol list, it returns the i-th symbol or a list of symbols
selected by key.
Otherwise, it outputs a symbol that slice the input by the given key. Currently, this
function supports the following types of key:
- integer types, e.g., int, long, np.int32, np.int64
- slice containing integer constants, e.g., slice(0, None, None)
- tuple contaning the above elements, which is used for multidimensional indexing
Parameters
----------
key : int, slice, or tuple of all previous types
Indexing key.
"""
num_outputs = self.num_outputs
if num_outputs > 1:
num_outputs = self.num_outputs
if isinstance(key, integer_types):
key = int(key)
if key < -num_outputs or key >= num_outputs:
raise IndexError('list index out of range')
if key < 0:
key += num_outputs
ret_handle = SymbolHandle()
check_call(_LIB.MXSymbolGetOutput(self.handle, mx_uint(key),
ctypes.byref(ret_handle)))
return _Symbol(handle=ret_handle)
elif isinstance(key, py_slice):
start, stop, step = key.indices(num_outputs)
return Group([self[i] for i in range(start, stop, step)], _Symbol)
else:
raise TypeError('indices of symbol group must be integers or slices, not {}'
.format(type(key)))
else:
if isinstance(key, integer_types):
if key == -1:
sliced = _npi.slice(self, [key], [None])
else:
sliced = _npi.slice(self, [key], [key+1])
return _npi.reshape(sliced, (-3, -4))
elif isinstance(key, py_slice):
if key.step is None or key.step != 0:
start = [None] if key.start is None else key.start
stop = [None] if key.stop is None else key.stop
return _npi.slice(self, start, stop, key.step)
else:
raise ValueError("slice step cannot be zero")
elif isinstance(key, tuple):
begin = []
end = []
step = []
new_shape = ()
if len(key) == 0:
return self
for index in key:
if isinstance(index, py_slice):
if index.step is not None and index.step == 0:
raise ValueError("slice step cannot be zero")
begin.append(index.start)
end.append(index.stop)
step.append(index.step)
new_shape += (-2,)
elif isinstance(index, integer_types):
if index >= 0:
begin.append(index)
end.append(index+1)
step.append(1)
else:
begin.append(index)
end.append(index - 1)
step.append(-1)
new_shape += (-3,)
else:
raise IndexError('Only integer, slice, or tuple of these types'
' are supported! Received key={}'.format(key))
new_shape += (-4,)
sliced = _npi.slice(self, begin, end, step)
return _npi.reshape(sliced, new_shape)
else:
raise IndexError('Only integer, slice, or tuple of these types are supported! '
'Received key={}'.format(key))
def __setitem__(self, key, value):
raise NotImplementedError
def __repr__(self):
"""Gets a string representation of the symbol."""
if self.num_outputs > 1:
name = ', '.join([str(ele_sym) for ele_sym in self])
return '<%s group [%s]>' % (self.__class__.__name__, name)
else:
return '<%s %s>' % (self.__class__.__name__, self.name)
@property
def name(self):
"""Gets name string from the symbol, this function only works for symbols
that are not a list (grouped symbols).
Returns
-------
value : str
The name of this symbol, returns ``None`` for list symbol.
"""
if self.num_outputs > 1:
raise AttributeError('This is a Group Symbol that contains {} elements and'
' does not have a name. Use str(sym) to print the name of '
'all the elements instead.'.format(self.num_outputs))
ret = ctypes.c_char_p()
success = ctypes.c_int()
check_call(_LIB.MXSymbolGetName(
self.handle, ctypes.byref(ret), ctypes.byref(success)))
assert success.value != 0,\
'Fail to infer the name of a symbol that is not a list!'
return py_str(ret.value)
def __iter__(self):
if self.num_outputs == 1:
raise TypeError("'{}' is not iterable.".format(self))
return iter((self[i] for i in range(self.num_outputs)))
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
def __invert__(self):
"""x.__invert__() <=> ~x"""
return invert(self)
def __and__(self, other):
"""x.__and__(y) <=> x & y"""
return bitwise_and(self, other)
def __or__(self, other):
"""x.__or__(y) <=> x | y"""
return bitwise_or(self, other)
def __xor__(self, other):
"""x.__xor__(y) <=> x ^ y"""
return bitwise_xor(self, other)
def __round__(self, n=0):
"""x.__round__(n)"""
return round(self, decimals=n)
def __abs__(self):
"""x.__abs__()"""
return absolute(self)
def __ceil__(self):
"""x.__ceil__()"""
return ceil(self)
def __floor__(self):
"""x.__floor__()"""
return floor(self)
def __trunc__(self):
"""x.__trunc__()"""
return trunc(self)
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return multiply(other, self)
def __div__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y / x"""
return divide(other, self)
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
def __idiv__(self, other):
raise NotImplementedError
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
def __itruediv__(self, other):
raise NotImplementedError
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
def __rpow__(self, other):
return power(other, self)
def __neg__(self):
"""x.__neg__() <=> - x"""
return negative(self)
def __deepcopy__(self, _):
return super(_Symbol, self).as_np_ndarray()
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
return equal(self, other)
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
return not_equal(self, other)
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
return greater(self, other)
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
return greater_equal(self, other)
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
return less(self, other)
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
return less_equal(self, other)
def __len__(self):
if self.num_outputs == 1:
raise TypeError('{} is not a list and does not support len().'.format(self))
return self.num_outputs
@property
def num_outputs(self):
"""The number of outputs of a symbol. If the symbol is not a symbollist, it returns 1.
Otherwise, it returns the number of elements of the list."""
output_count = mx_uint()
check_call(_LIB.MXSymbolGetNumOutputs(self.handle, ctypes.byref(output_count)))
return output_count.value
def as_nd_ndarray(self):
"""Convert _Symbol to mxnet.symbol.Symbol to use its convenience fluent methods."""
hdl = SymbolHandle()
check_call(_LIB.MXShallowCopySymbol(self.handle, ctypes.byref(hdl)))
return Symbol(handle=hdl)
def as_np_ndarray(self):
"""For the convenience of conversion between legacy and np symbols."""
return self
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose()."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument,too-many-arguments
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
Notes
-----
This function differs from the official `ndarray`'s ``astype`` function in the following
aspects:
- `order` only supports 'C' and 'K'.
- `casting` only supports 'unsafe'.
- `subok` only supports ``True``.
"""
if order is not None and order != 'K' and order != 'C':
raise ValueError('order must be either \'K\' or \'C\'')
if casting != 'unsafe':
raise ValueError('casting must be equal to \'unsafe\'')
if not subok:
raise ValueError('subok must be equal to True')
return _npi.cast(self, dtype=dtype)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return _mx_np_op.dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `mxnet.numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError('{} is an invalid keyword argument for this function'
.format(kwargs.keys()[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute reshape_like')
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute slice_like')
def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return take(self, indices, axis, mode=mode)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute pick')
def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise sort(self, axis=axis, kind=kind, order=order)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute topk')
def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return argsort(self, axis=axis, kind=kind, order=order)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute argmax_channel')
def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the minimum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmin(self, axis, out)
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ, redefined-outer-name
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute abs')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute expand_dims')
def tile(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tile`.
The arguments are the same as for :py:func:`tile`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute tile')
def transpose(self, *axes): # pylint: disable=arguments-differ
"""The arguments are the same as for :py:func:`transpose`, with
this array as data.
"""
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)):
axes = axes[0]
elif axes[0] is None:
axes = None
return transpose(self, axes=axes)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute diag')
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return _npi.sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ,too-many-arguments
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=arguments-differ,too-many-arguments
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _npi.cumsum(self, axis=axis, dtype=dtype, out=out)
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _npi.max(self, axis=axis, keepdims=keepdims, out=out)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the minimum along a given axis."""
return _npi.min(self, axis=axis, keepdims=keepdims, out=out)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute norm')
def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return round(self, decimals=decimals, out=out, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('_Symbol object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return squeeze(self, axis=axis)
def broadcast_to(self, *args, **kwargs):
raise AttributeError('_Symbol object has no attribute broadcast_to')
def broadcast_like(self, *args, **kwargs):
raise AttributeError('_Symbol object has no attribute broadcast_like')
@set_module('mxnet.symbol.numpy')
def zeros(shape, dtype=float, order='C', ctx=None):
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type .
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `zeros` function where `float64`
is the default value, here we can set 'float32' or 'float64' as your default dtype,
because `float32` is considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : Symbol
Array of zeros with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.symbol.numpy')
def ones(shape, dtype=None, order='C', ctx=None):
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `ones` function where
`float64` is the default value.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : _Symbol
Array of ones with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
def broadcast_to(array, shape):
"""
Broadcast an array to a new shape.
Parameters
----------
array : _Symbol or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.symbol.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or _Symbol
Fill value.
dtype : data-type, optional
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Have an additional `ctx` argument to specify the device
- Have an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, Symbol):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
if isinstance(fill_value, bool):
fill_value = int(fill_value)
dtype = _np.bool if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.symbol.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : _Symbol
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : _Symbol
Array `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, bool):
fill_value = int(fill_value)
return _npi.full_like(a, fill_value=fill_value, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.symbol.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : _Symbol
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : _Symbol
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
zeros : Return a new array of given shape filled with zeros.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.symbol.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : _Symbol
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : _Symbol
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
zeros : Return a new array of given shape filled with zeros.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, ctx=ctx, dtype=dtype, out=out)
@set_module('mxnet.symbol.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : _Symbol
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
"""
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : _Symbol
The source array.
indices : _Symbol
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : _Symbol
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
"""
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : Symbol or numeric value
Left-hand side operand.
rhs : Symbol or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``Symbol`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``Symbol`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``Symbol``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
mxnet.numpy.ndarray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
is_int = isinstance(rhs, integer_types)
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, scalar=float(lhs), is_int=is_int, out=out)
else:
return rfn_scalar(rhs, scalar=float(lhs), is_int=is_int, out=out)
elif isinstance(rhs, numeric_types):
is_int = isinstance(rhs, integer_types)
return lfn_scalar(lhs, scalar=float(rhs), is_int=is_int, out=out)
elif isinstance(rhs, Symbol):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.symbol.numpy')
def true_divide(x1, x2, out=None):
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def fmod(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.fmod, _np.fmod, _npi.fmod_scalar, _npi.rfmod_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None, **kwargs):
"""
Matrix product of two arrays.
Parameters
----------
a, b : _Symbol.
out : _Symbol, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : _Symbol
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m).
"""
return _npi.matmul(a, b, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : _Symbols or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : _Symbol or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
"""
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : _Symbol
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : _Symbol, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
"""
if order is not None:
raise NotImplementedError("order is not supported yet...")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.symbol.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : _Symbol
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
"""
if order is not None:
raise NotImplementedError("order is not supported yet...")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.symbol.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : _Symbol
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
"""
if _np.isscalar(axes):
return _npi.tensordot_int_axes(a, b, axes)
if len(axes) != 2:
raise ValueError('Axes must consist of two arrays.')
a_axes_summed, b_axes_summed = axes
if _np.isscalar(a_axes_summed):
a_axes_summed = (a_axes_summed,)
if _np.isscalar(b_axes_summed):
b_axes_summed = (b_axes_summed,)
if len(a_axes_summed) != len(b_axes_summed):
raise ValueError('Axes length mismatch')
return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)
@set_module('mxnet.symbol.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable= too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : Symbol
Input data. The histogram is computed over the flattened array.
bins : int or Symbol
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
"""
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not avaialble yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, Symbol):
return _npi.histogram(a, bins)
raise ValueError("histogram fails with", locals())
@set_module('mxnet.symbol.numpy')
def eye(N, M=None, k=0, dtype=float, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
I : _Symbol of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
"""
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.symbol.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : _Symbol
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(This parameter is not supported at this moment)
Returns
-------
out : _Symbol
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
"""
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Parameter 'shape' is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.symbol.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : _Symbol
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
if isinstance(start, (list, _np.ndarray)) or isinstance(stop, (list, _np.ndarray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.symbol.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : scalar
``base ** start`` is the starting value of the sequence.
stop : scalar
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : scalar, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : scalar, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : scalar, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : _Symbol
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
if isinstance(start, (list, _np.ndarray)) or \
isinstance(stop, (list, _np.ndarray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.symbol.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
Parameters
----------
a : _Symbol
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : _Symbol
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.symbol.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : _Symbol, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : _Symbol, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
"""
return _npi.tril(m, k)
@set_module('mxnet.symbol.numpy')
def triu(m, k=0):
r"""
Upper triangle of an array.
Return a copy of an array with elements under the `k`-th diagonal zeroed.
Parameters
----------
m : _Symbol, shape (M, N)
Input array.
k : int, optional
Diagonal under which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is under.
Returns
-------
triu : _Symbol, shape (M, N)
Upper triangle of `m`, of same shape and data-type as `m`.
See Also
--------
tril : same thing, only for the lower triangle
"""
return _npi.triu(m, k)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of _Symbol
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
if m is None:
m = n
return _npi.tril_indices(n, k, m)
@set_module('mxnet.symbol.numpy')
def trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : _Symbol
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : _Symbol
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
sum_along_diagonals : _Symbol
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
"""
return _npi.trace(a, offset=offset, axis1=axis1, axis2=axis2, out=out)
@set_module('mxnet.symbol.numpy')
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : _Symbol
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : _Symbol
a with its axes permuted.
"""
return _npi.transpose(a, axes=axes)
@set_module('mxnet.symbol.numpy')
def tri(N, M=None, k=0, dtype=None, ctx=None):
r"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : Symbol of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
"""
if dtype is None:
dtype = 'float32'
if M is None:
M = N
if ctx is None:
ctx = current_context()
return _npi.tri(N, M, k, dtype, ctx)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _npi.repeat(a, repeats=repeats, axis=axis)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
"""Helper function for unary operators.
Parameters
----------
x : _Symbol or scalar
Input of the unary operator.
fn_array : function
Function to be called if x is of ``_Symbol`` type.
fn_scalar : function
Function to be called if x is a Python scalar.
out : _Symbol
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol or scalar
Result _Symbol or scalar.
"""
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, _Symbol):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : _Symbol or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The sine of each element of x.
This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : _Symbol or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : _Symbol or scalar
Input array or scalar.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : _Symbol or scalar
Input array or scalar.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : _Symbol
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
mxnet.base.MXNetError:
[07:17:36] ../src/ndarray/./../operator/tensor/../elemwise_op_common.h:135:
Check failed: assign(&dattr, vec.at(i)): Incompatible attr in node
at 0-th output: expected [3,3], got [2,2]
"""
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : _Symbol or scalar
Input array or scalar.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : _Symbol or scalar
The values whose square-roots are required.
out : _Symbol, or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
"""
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
r"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : _Symbol
The values whose cube-roots are required.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
----------
y : _Symbol
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
"""
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
absolute : _Symbol
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
absolute : _Symbol
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : _Symbol
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
----------
absolute : _Symbol
An ndarray containing the absolute value of each element in x.
"""
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
r"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : _Symbol or a scalar
Input values.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...)
- ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : _Symbol or scalar
Input values.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
"""
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : _Symbol or scalar
Input values.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol
Output array, .
This is a scalar if `x` is a scalar.
"""
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : _Symbol or scalar
The values whose reciprocals are required.
out : _Symbol, or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
angle : _Symbol or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `symbol` has the same `ctx` as the input `symbol`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support _Symbol or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
r"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : _Symbol
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
----------
angle : _Symbol
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
"""
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : _Symbol or scalar
Input values.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
"""
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : _Symbol
Input value. Elements must be of real value.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto braodcasting. ``out`` symbol's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : _Symbol
Input value. Elements must be of real value.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` symbol's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : _Symbol or scalar
Angles in degrees.
out : _Symbol or None, optional
A location into which the result is stored.
Returns
-------
y : _Symbol or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x * 180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
"""
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only _Symbol or scalar is accpted as valid input, tuple of _Symbol is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
"""
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : _Symbol
Input values.
out : _Symbol or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : _Symbol
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
"""
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : _Symbol or scalar
Input array in degrees.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only _Symbol or scalar is accpted as valid input, tuple of _Symbol is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
deg2rad(x, out=None)
Convert angles from degrees to radians.
Parameters
----------
x : _Symbol or scalar
Angles in degrees.
out : _Symbol or None, optional
A location into which the result is stored.
Returns
-------
y : _Symbol or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
"""
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : _Symbol or scalar
The values whose reciprocals are required.
out : _Symbol, or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `symbol` has the same `ctx` as the input `symbol`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support _Symbol and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : _Symbol or scalar
The values whose reciprocals are required.
out : _Symbol, or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
The output `symbol` has the same `ctx` as the input `symbol`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support _Symbol and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
Returns:
-------
y : _Symbol or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
---------
>>> np.negative(1)
-1
"""
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero. The rounded values are returned as floats.
Parameters:
----------
x : _Symbol or scalar
An array of floats to be rounded
out : _Symbol or scalar, optional
Output array
Returns:
---------
y : _Symbol or scalar
Examples:
----------
>>> np.fix(3.14)
3
"""
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : _Symbol or scalar
Input array.
out : _Symbol or scalar or None.
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
Returns:
-------
y : _Symbol or scalar
The corresponding tangent values. This is a scalar if x is a scalar.
"""
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray. if not, you will get an error!
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : _Symbol
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : _Symbol
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : _Symbol
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
"""
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, Symbol):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, Symbol): # pylint: disable= undefined-variable
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, Symbol): # pylint: disable= undefined-variable
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, Symbol):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The floor of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> # if you use parameter out, x and out must be ndarray. if not, you will get an error!
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : _Symbol or scalar
Input data.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
"""
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : _Symbol or scalar
Logical NOT is applied to the elements of `x`.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : bool or _Symbol
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
"""
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic sine, element-wise.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
arcsinh : _Symbol
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
"""
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
arccosh : _Symbol
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
"""
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
arctanh : _Symbol
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
"""
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : _Symbol or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `x` along each axis.
Returns
-------
c : _Symbol
The tiled output array.
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
@set_module('mxnet.symbol.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
arange : _Symbol
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.symbol.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : _Symbol
Input array.
obj : slice, scaler or _Symbol of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : scaler, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : _Symbol
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
"""
if not isinstance(arr, Symbol):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, Symbol):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : _Symbol
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : _Symbol
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division."""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple / list / set of ints')
return _npi.split(ary, indices, axis, False, sections)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : _Symbol
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
"""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple / list / set of ints')
ret = _npi.array_split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : _Symbol
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : _Symbol
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple of ints')
return _npi.hsplit(ary, indices, 1, False, sections)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : _Symbol
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of _Symbols
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
"""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple of ints')
return _npi.split(ary, indices, 0, False, sections)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : _Symbol
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
"""
indices = []
sections = 0
if isinstance(indices_or_sections, int):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must either int or tuple of ints')
return _npi.dsplit(ary, indices, 2, False, sections)
# pylint: enable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of _Symbols
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : _Symbol
The concatenated array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
"""
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.symbol.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : _Symbol
Values are appended to a copy of this array.
values : _Symbol
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : _Symbol
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.symbol.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of _Symbols
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : _Symbol, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : _Symbol
The stacked array has one more dimension than the input arrays."""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.symbol.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of _Symbol
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : _Symbol
The array formed by stacking the given arrays, will be at least 2-D.
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.symbol.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of _Symbol
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : _Symbol
The array formed by stacking the given arrays, will be at least 2-D.
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.symbol.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.column_stack(*tup)
@set_module('mxnet.symbol.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : _Symbol
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : _Symbol
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.hstack(*arrays)
@set_module('mxnet.symbol.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of _Symbol
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : _Symbol
The array formed by stacking the given arrays, will be at least 2-D.
"""
return _npi.dstack(*arrays)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.fmax, _np.fmax, _npi.fmax_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : _Symbol
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : _Symbol
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
"""
return _npi.max(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
"""
return _npi.min(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
"""
return _npi.amax(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
"""
return _npi.amin(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : _Symbol
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
all : _Symbol, bool
A new boolean or array is returned unless out is specified,
in which case a reference to out is returned.
"""
return _npi.all(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless axis is not None
Parameters
----------
a : _Symbol
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
any : bool or _Symbol
A new boolean or ndarray is returned unless out is specified,
in which case a reference to out is returned.
"""
return _npi.any(a, axis=axis, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : _Symbol
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : _Symbol or `None`
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : _Symbol
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.symbol.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : _Symbol
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : _Symbol
Swapped array symbol.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.symbol.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : _Symbol
Input array. Only support dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
index_array : _Symbol of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` symnbol's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.symbol.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : _Symbol
Input array. Only support dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : _Symbol or None, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
index_array : _Symbol of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` symbol's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` symnbol's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : _Symbol
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : _Symbol, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : _Symbol, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : _Symbol
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will beyour current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 or float64 returned dtype, which depends on your current default dtype
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.symbol.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : `_Symbol`
_Symbol containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean.
For integer inputs, When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
for floating point inputs, it is the same as the input dtype.
out : _Symbol, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of _Symbol, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : _Symbol, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only _Symbol is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32 or float64, which depends on your current default dtype
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : `_Symbol`
_Symbol containing numbers whose standard deviation is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviations are computed.
The default is to compute the standard deviation of the flattened array.
If this is a tuple of ints, computation is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the standard deviation. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : _Symbol, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of _Symbol, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : _Symbol, see dtype parameter above
If out=None, returns a new array containing the standard deviation values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.std
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only _Symbol is accepted as valid input, python iterables or scalar is not supported
- default output data type for integer input is float32
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.symbol.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : `_Symbol`
_Symbol containing numbers whose variance is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed.
The default is to compute the variance of the flattened array.
If this is a tuple of ints, computation is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance.
For arrays of integer type,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
For arrays of float types it is the same as the array type.
out : _Symbol, optional
Dummy parameter to keep the consistency with the ndarray counterpart.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of _Symbol, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : _Symbol, see dtype parameter above
If out=None, returns a new array containing the variance values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.var
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only _Symbol is accepted as valid input, python iterables or scalar is not supported
- default output data type for integer input is float32
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
def indices(dimensions, dtype=None, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `int64`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : _Symbol
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : _Symbol or scalar
Values to change the sign of.
x2 : _Symbol or scalar
The sign of `x2` is copied to `x1`.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
out : _Symbol
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
"""
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.symbol.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : _Symbol
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : _Symbol
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
"""
if order == 'F':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, _Symbol):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : _Symbol
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
Returns:
-------------
unraveled_coords : _Symbol
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
([3. 6. 6.]
[4. 5. 1.])
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if order == 'C':
return _npi.unravel_index_fallback(indices, shape=shape)
else:
raise NotImplementedError('Don not support column-major (Fortran-style) order at this moment')
def flatnonzero(a):
r"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : _Symbol
Input data.
Returns
-------
res : _Symbol
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
"""
out = _npi.nonzero(ravel(a))
return out.reshape(-1,)
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : _Symbol
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: _Symbol
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return _npi.diag_indices_from(arr)
@set_module('mxnet.symbol.numpy')
def hanning(M, dtype=None, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : _Symbol, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.symbol.numpy')
def hamming(M, dtype=None, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : _Symbol, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.symbol.numpy')
def blackman(M, dtype=None, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : _Symbol
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.symbol.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : _Symbol or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : _Symbol or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : _Symbol or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
"""
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, _Symbol):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.symbol.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
"""
return flip(m, 0)
@set_module('mxnet.symbol.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
"""
return flip(m, 1)
@set_module('mxnet.symbol.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : _Symbol or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : _Symbol, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : _Symbol or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
"""
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, _Symbol):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.symbol.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, _Symbol):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.symbol.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, _Symbol):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : _Symbol or scalar
`y`-coordinates.
x2 : _Symbol or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
"""
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : _Symbol or scalar
Leg of the triangle(s).
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
z : _Symbol or scalar
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
"""
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : _Symbol or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Result.
"""
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : _Symbol or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Result.
"""
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : _Symbol or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : _Symbol or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Result.
"""
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : _Symbol
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : _Symbol
The sorted unique values.
unique_indices : _Symbol, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : _Symbol, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : _Symbol, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
"""
return _npi.unique(ar, return_index, return_inverse, return_counts, axis)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : _Symbol
Array of multipliers.
x2 : _Symbol
Array of twos exponents.
out : _Symbol or None
Dummy parameter to keep the consistency with the ndarray counterpart.
Returns
-------
y : _Symbol
The result of ``x1 * 2**x2``.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
"""
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.symbol.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : _Symbol
First argument to the dot product.
b : _Symbol
Second argument to the dot product.
Returns
-------
output : _Symbol
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.symbol.numpy')
def inner(a, b):
r"""Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : _Symbol
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : _Symbol
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.symbol.numpy')
def outer(a, b):
r"""Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) _Symbol
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) _Symbol
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) _Symbol
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.symbol.numpy')
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : _Symbol
Components of the first vector(s).
b : _Symbol
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : _Symbol
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
Notes
-----
Supports full broadcasting of the inputs.
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
return _npi.cross(a, b, axisa, axisb, axisc)
@set_module('mxnet.symbol.numpy')
def kron(a, b):
r"""
kron(a, b)
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : ndarray
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
"""
return _npi.kron(a, b)
@set_module('mxnet.symbol.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.symbol.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.symbol.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.symbol.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : _Symbol or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : Dummy parameter, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : _Symbol or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.symbol.numpy')
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : _Symbol
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : _Symbol
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
"""
return _npi.roll(a, shift, axis=axis)
@wrap_np_binary_func
def logical_and(x1, x2, out=None):
r"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical AND is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_or, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _ufunc_helper(x1, x2, _npi.logical_and, _np.logical_and, _npi.logical_and_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def logical_or(x1, x2, out=None):
r"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([True, True])
"""
return _ufunc_helper(x1, x2, _npi.logical_or, _np.logical_or, _npi.logical_or_scalar, None, out)
@set_module('mxnet.symbol.numpy')
@wrap_np_binary_func
def logical_xor(x1, x2, out=None):
r"""
Compute the truth value of x1 XOR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_or, bitwise_or
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _ufunc_helper(x1, x2, _npi.logical_xor, _np.logical_xor, _npi.logical_xor_scalar, None, out)
@set_module('mxnet.symbol.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : _Symbol
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : _Symbol
A rotated view of `m`.
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.symbol.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of _Symbol
These are the arrays for the operation.
out : _Symbol, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : _Symbol
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.symbol.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : _Symbol
Input array
q : _Symbol
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : _Symbol
Output array.
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.symbol.numpy')
def median(a, axis=None, out=None, overwrite_input=None, keepdims=False):
r"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : _Symbol
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : _Symbol, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : _Symbol
A new array holding the result. If the input contains integers
or floats smaller than ``float32``, then the output data-type is
``np.float32``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
"""
return quantile(a=a, q=0.5, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation='midpoint', keepdims=keepdims)
@set_module('mxnet.symbol.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : _Symbol
Input array or object that can be converted to an array.
q : _Symbol
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : _Symbol
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be _Symbol type even if it is a scalar
- do not support overwrite_input
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.symbol.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : _Symbol
Input arrays
Returns
-------
out : _Symbol
"""
return _npi.share_memory(a, b)
@set_module('mxnet.symbol.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : _Symbol
Input arrays
Returns
-------
out : _Symbol
"""
return _npi.share_memory(a, b)
@set_module('mxnet.symbol.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : _Symbol
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : _Symbol, optional
Not supported yet
Returns
-------
diff : _Symbol
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
This is the same as the type of a in most cases.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.symbol.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : _Symbol
If necessary, will be flattened before the differences are taken.
to_end : _Symbol or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : _Symbol or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : _Symbol
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
"""
input_type = (isinstance(to_begin, _Symbol), isinstance(to_end, _Symbol))
# case 1: when both `to_begin` and `to_end` are arrays
if input_type == (True, True):
return _npi.ediff1d(ary, to_begin, to_end, to_begin_arr_given=True, to_end_arr_given=True,
to_begin_scalar=None, to_end_scalar=None)
# case 2: only `to_end` is array but `to_begin` is scalar/None
elif input_type == (False, True):
return _npi.ediff1d(ary, to_end, to_begin_arr_given=False, to_end_arr_given=True,
to_begin_scalar=to_begin, to_end_scalar=None)
# case 3: only `to_begin` is array but `to_end` is scalar/None
elif input_type == (True, False):
return _npi.ediff1d(ary, to_begin, to_begin_arr_given=True, to_end_arr_given=False,
to_begin_scalar=None, to_end_scalar=to_end)
# case 4: both `to_begin` and `to_end` are scalar/None
else:
return _npi.ediff1d(ary, to_begin_arr_given=False, to_end_arr_given=False,
to_begin_scalar=to_begin, to_end_scalar=to_end)
@set_module('mxnet.symbol.numpy')
def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : _Symbol
The x-coordinates of the interpolated values.
xp : _Symbol
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : _Symbol
The y-coordinates of the data points, same length as `xp`.
left : optional float corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : _Symbol
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
"""
if isinstance(x, numeric_types):
return _npi.interp(xp.astype(float), fp.astype(float), left=left,
right=right, period=period, x_scalar=x, x_is_scalar=True)
return _npi.interp(xp.astype(float), fp.astype(float), x.astype(float), left=left,
right=right, period=period, x_scalar=0.0, x_is_scalar=False)
@set_module('mxnet.symbol.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : _Symbol
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : _Symbol
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.symbol.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : _Symbol
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : _Symbol
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
"""
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf)
elif isinstance(x, _Symbol):
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.symbol.numpy')
def squeeze(x, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
return _npi.squeeze(x, axis=axis)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : _Symbol or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : _Symbol or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
"""
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : _Symbol or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
"""
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : _Symbol or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
"""
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : _Symbol or scalar
Input array.
out : _Symbol or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : _Symbol or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
"""
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.symbol.numpy')
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : _Symbol
One or more input arrays.
Returns
-------
ret : _Symbol
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.
See also
--------
atleast_2d, atleast_3d
"""
return _npi.atleast_1d(*arys)
@set_module('mxnet.symbol.numpy')
def atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : _Symbol
One or more input arrays.
Returns
-------
ret : _Symbol
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.
See also
--------
atleast_1d, atleast_3d
"""
return _npi.atleast_2d(*arys)
@set_module('mxnet.symbol.numpy')
def atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.
Parameters
----------
arys1, arys2, ... : _Symbol
One or more input arrays.
Returns
-------
ret : _Symbol
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
See also
--------
atleast_1d, atleast_2d
"""
return _npi.atleast_3d(*arys)
@set_module('mxnet.symbol.numpy')
def where(condition, x, y):
"""
Return elements chosen from `x` or `y` depending on `condition`.
Parameters
----------
condition : _Symbol
Where True, yield `x`, otherwise yield `y`.
x, y : _Symbol
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : _Symbol
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
"""
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, Symbol) and isinstance(y, Symbol):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, Symbol):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, Symbol):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.symbol.numpy')
def load(fname):
"""Loads symbol from a JSON file.
You can also use pickle to do the job if you only work on python.
The advantage of load/save is the file is language agnostic.
This means the file saved using save can be loaded by other language binding of mxnet.
You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file, examples:
- `s3://my-bucket/path/my-s3-symbol`
- `hdfs://my-bucket/path/my-hdfs-symbol`
- `/path-to/my-local-symbol`
Returns
-------
sym : _Symbol
The loaded symbol.
See Also
--------
_Symbol.save : Used to save symbol into file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname needs to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle)))
return _Symbol(handle)
@set_module('mxnet.symbol.numpy')
def load_json(json_str):
"""Loads symbol from json string.
Parameters
----------
json_str : str
A JSON string.
Returns
-------
sym : Symbol
The loaded symbol.
See Also
--------
_Symbol.tojson : Used to save symbol into json string.
"""
if not isinstance(json_str, string_types):
raise TypeError('json_str needs to be string')
handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle)))
return _Symbol(handle)
@set_module('mxnet.symbol.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : _Symbol
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : _Symbol
An array of numbers, at which to evaluate p.
Returns
-------
values : _Symbol
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
"""
if isinstance(p, Symbol) and isinstance(x, Symbol):
return _npi.polyval(p, x)
elif not isinstance(p, Symbol) and not isinstance(x, Symbol):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.symbol.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : _Symbol
input data
weights: _Symbol
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : _Symbol
the result of binning the input data. The length of out is equal to amax(x)+1.
Raises:
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
"""
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
@set_module('mxnet.symbol.numpy')
def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
"""
# pylint: disable = too-many-return-statements, inconsistent-return-statements
if not _np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
if not isinstance(pad_width, tuple):
raise TypeError("`pad_width` must be tuple.")
if mode == "linear_ramp":
raise ValueError("mode {'linear_ramp'} is not supported.")
if mode == "wrap":
raise ValueError("mode {'wrap'} is not supported.")
if mode == "median":
raise ValueError("mode {'median'} is not supported.")
if mode == "mean":
raise ValueError("mode {'mean'} is not supported.")
if mode == "empty":
raise ValueError("mode {'empty'} is not supported.")
if callable(mode):
raise ValueError("mode {'<function>'} is not supported.")
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
if isinstance(mode, _np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))
unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
if mode == "constant":
values = kwargs.get("constant_values", 0)
if isinstance(values, tuple):
raise TypeError("unsupported constant_values type: {'tuple'}.")
return _npi.pad(x, pad_width, mode='constant', constant_values=values)
elif mode == "symmetric":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even")
elif mode == "edge":
return _npi.pad(x, pad_width, mode='edge')
elif mode == "reflect":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='reflect', reflect_type="even")
elif mode == "maximum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='maximum')
elif mode == "minimum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='minimum')
return _npi.pad(x, pad_width, mode='constant', constant_values=0)
@set_module('mxnet.symbol.numpy')
def prod(a, axis=None, dtype=None, keepdims=False, initial=None, output=None): # pylint: disable=too-many-arguments
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
where : not supported
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _npi.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial)
@set_module('mxnet.symbol.numpy')
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : _Symbol
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : _Symbol, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : _Symbol.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
"""
return _npi.cumsum(a, axis=axis, dtype=dtype, out=out)
@set_module('mxnet.symbol.numpy')
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : integer
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start: int, optional
The axis is rolled until it lies before this position.
The default, 0, results in a “complete” roll.
Returns
-------
res : ndarray
A view after applying rollaxis to `a` is returned.
-----
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
return _npi.rollaxis(a, axis, start)
@set_module('mxnet.symbol.numpy')
def diag(v, k=0):
"""
Extracts a diagonal or constructs a diagonal array.
- 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.
- 2-D arrays: extracts the k-th Diagonal
Parameters
----------
array : _Symbol
The array to apply diag method.
k : offset
extracts or constructs kth diagonal given input array
Returns
----------
out : _Symbol
The extracted diagonal or constructed diagonal array.
"""
return _npi.diag(v, k=k)
@set_module('mxnet.symbol.numpy')
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _npi.diagflat(v, k=k)
@set_module('mxnet.symbol.numpy')
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of
the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the
resulting array can be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
Parameters
----------
a : _Symbol
Input data from which diagonal are taken.
offset: int, Optional
Offset of the diagonal from the main diagonal
axis1: int, Optional
Axis to be used as the first axis of the 2-D sub-arrays
axis2: int, Optional
Axis to be used as the second axis of the 2-D sub-arrays
Returns
-------
out : _Symbol
Output result
Raises
-------
ValueError: If the dimension of a is less than 2.
"""
return _npi.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
# pylint:disable=redefined-outer-name, too-many-arguments
@set_module('mxnet.symbol.numpy')
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : _Symbol
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : _Symbol
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
"""
if where is not None and where is not True:
raise ValueError("only where=None or where=True cases are supported for now")
return _npi.sum(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)
# pylint:enable=redefined-outer-name, too-many-arguments
_set_np_symbol_class(_Symbol)
| apache-2.0 |
qingshuimonk/STA663 | docs/ae_same_structure.py | 1 | 4710 | # -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import matplotlib
matplotlib.use('PS')
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 20
batch_size = 256
display_step = 1
examples_to_show = 8
# Network Parameters
n_hidden_1 = 500 # 1st layer num features
n_hidden_2 = 500 # 2nd layer num features
n_z = 20
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'encoder_z': tf.Variable(tf.random_normal([n_hidden_2, n_z])),
'decoder_z': tf.Variable(tf.random_normal([n_z, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_bz': tf.Variable(tf.random_normal([n_z])),
'decoder_bz': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
layer_z = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_z']),
biases['encoder_bz']))
return layer_z
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_z = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_z']),
biases['decoder_bz']))
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(layer_z, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
# with tf.Session() as sess:
sess = tf.Session()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
plt.figure(figsize=(8, 2))
for i in range(8):
ax = plt.subplot(2, 8, i+1)
plt.imshow(mnist.test.images[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax = plt.subplot(2, 8, 8+i+1)
plt.imshow(encode_decode[i].reshape(28, 28), vmin=0, vmax=1, cmap="gray")
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
#plt.tight_layout()
plt.show()
plt.savefig('../]data/{}.png'.format('ae_pic'), bbox_inches='tight') | mit |
SaganBolliger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| agpl-3.0 |
huzq/scikit-learn | examples/miscellaneous/plot_anomaly_comparison.py | 11 | 6344 | """
============================================================================
Comparing anomaly detection algorithms for outlier detection on toy datasets
============================================================================
This example shows characteristics of different anomaly detection algorithms
on 2D datasets. Datasets contain one or two modes (regions of high density)
to illustrate the ability of algorithms to cope with multimodal data.
For each dataset, 15% of samples are generated as random uniform noise. This
proportion is the value given to the nu parameter of the OneClassSVM and the
contamination parameter of the other outlier detection algorithms.
Decision boundaries between inliers and outliers are displayed in black
except for Local Outlier Factor (LOF) as it has no predict method to be applied
on new data when it is used for outlier detection.
The :class:`~sklearn.svm.OneClassSVM` is known to be sensitive to outliers and
thus does not perform very well for outlier detection. This estimator is best
suited for novelty detection when the training set is not contaminated by
outliers. That said, outlier detection in high-dimension, or without any
assumptions on the distribution of the inlying data is very challenging, and a
One-class SVM might give useful results in these situations depending on the
value of its hyperparameters.
:class:`~sklearn.covariance.EllipticEnvelope` assumes the data is Gaussian and
learns an ellipse. It thus degrades when the data is not unimodal. Notice
however that this estimator is robust to outliers.
:class:`~sklearn.ensemble.IsolationForest` and
:class:`~sklearn.neighbors.LocalOutlierFactor` seem to perform reasonably well
for multi-modal data sets. The advantage of
:class:`~sklearn.neighbors.LocalOutlierFactor` over the other estimators is
shown for the third data set, where the two modes have different densities.
This advantage is explained by the local aspect of LOF, meaning that it only
compares the score of abnormality of one sample with the scores of its
neighbors.
Finally, for the last data set, it is hard to say that one sample is more
abnormal than another sample as they are uniformly distributed in a
hypercube. Except for the :class:`~sklearn.svm.OneClassSVM` which overfits a
little, all estimators present decent solutions for this situation. In such a
case, it would be wise to look more closely at the scores of abnormality of
the samples as a good estimator should assign similar scores to all the
samples.
While these examples give some intuition about the algorithms, this
intuition might not apply to very high dimensional data.
Finally, note that parameters of the models have been here handpicked but
that in practice they need to be adjusted. In the absence of labelled data,
the problem is completely unsupervised so model selection can be a challenge.
"""
# Author: Alexandre Gramfort <[email protected]>
# Albert Thomas <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)]
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42)
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
vascotenner/holoviews | holoviews/plotting/mpl/widgets.py | 1 | 4365 | import uuid, json, warnings
import param
from ..widgets import NdWidget, SelectionWidget, ScrubberWidget
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from matplotlib.backends.backend_nbagg import CommSocket
except ImportError:
CommSocket = object
class WidgetCommSocket(CommSocket):
"""
CustomCommSocket provides communication between the IPython
kernel and a matplotlib canvas element in the notebook.
A CustomCommSocket is required to delay communication
between the kernel and the canvas element until the widget
has been rendered in the notebook.
"""
def __init__(self, manager):
self.supports_binary = None
self.manager = manager
self.uuid = str(uuid.uuid4())
self.html = "<div id=%r></div>" % self.uuid
def start(self):
try:
# Jupyter/IPython 4.0
from ipykernel.comm import Comm
except:
# IPython <=3.0
from IPython.kernel.comm import Comm
try:
self.comm = Comm('matplotlib', data={'id': self.uuid})
except AttributeError:
raise RuntimeError('Unable to create an IPython notebook Comm '
'instance. Are you in the IPython notebook?')
self.comm.on_msg(self.on_message)
self.comm.on_close(lambda close_message: self.manager.clearup_closed())
class MPLWidget(NdWidget):
CDN = param.Dict(default=dict(NdWidget.CDN, mpld3='https://mpld3.github.io/js/mpld3.v0.3git.js',
d3='https://cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3.js'))
extensionjs = param.String(default='mplwidgets.js', doc="""
Optional javascript extension file for a particular backend.""")
template = param.String(default='mplwidgets.jinja')
def __init__(self, plot, renderer=None, **params):
super(MPLWidget, self).__init__(plot, renderer, **params)
if self.renderer.mode == 'nbagg':
self.cached = False
self.initialize_connection(plot)
def _plot_figure(self, idx):
with self.renderer.state():
self.plot.update(idx)
if self.renderer.mode == 'mpld3':
figure_format = 'json'
elif self.renderer.fig == 'auto':
figure_format = self.renderer.params('fig').objects[0]
else:
figure_format = self.renderer.fig
return self.renderer.html(self.plot, figure_format)
def update(self, key):
if self.plot.dynamic == 'bounded' and not isinstance(key, int):
key = tuple(dim.values[k] if dim.values else k
for dim, k in zip(self.mock_obj.kdims, tuple(key)))
if self.renderer.mode == 'nbagg':
if not self.manager._shown:
self.comm.start()
self.manager.add_web_socket(self.comm)
self.manager._shown = True
fig = self.plot[key]
fig.canvas.draw_idle()
return ''
frame = self._plot_figure(key)
if self.renderer.mode == 'mpld3':
return self.encode_frames({0: frame})
else:
return str(frame)
def get_frames(self):
if self.renderer.mode == 'nbagg':
self.manager.display_js()
frames = {0: self.comm.html}
elif self.embed:
return super(MPLWidget, self).get_frames()
else:
frames = {0: self._plot_figure(0)}
return self.encode_frames(frames)
def encode_frames(self, frames):
if self.export_json:
self.save_json(frames)
return {}
elif not isinstance(frames, dict):
pass
elif self.renderer.mode == 'mpld3':
import mpld3
encoder = dict(cls=mpld3._display.NumpyEncoder)
frames = dict(frames)
return json.dumps(frames, **encoder)
else:
frames = dict(frames)
return json.dumps(frames)
def initialize_connection(self, plot):
plot.update(0)
self.manager = self.renderer.get_figure_manager(plot.state)
self.comm = WidgetCommSocket(self.manager)
class MPLSelectionWidget(MPLWidget, SelectionWidget):
pass
class MPLScrubberWidget(MPLWidget, ScrubberWidget):
pass
| bsd-3-clause |
joshloyal/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
jumkey/PiFmRds | src/generate_waveforms.py | 15 | 2403 | #!/usr/bin/python
# PiFmRds - FM/RDS transmitter for the Raspberry Pi
# Copyright (C) 2014 Christophe Jacquet, F8FTK
#
# See https://github.com/ChristopheJacquet/PiFmRds
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This program generates the waveform of a single biphase symbol
#
# This program uses Pydemod, see https://github.com/ChristopheJacquet/Pydemod
import pydemod.app.rds as rds
import numpy
import scipy.io.wavfile as wavfile
import io
import matplotlib.pyplot as plt
sample_rate = 228000
outc = io.open("waveforms.c", mode="w", encoding="utf8")
outh = io.open("waveforms.h", mode="w", encoding="utf8")
header = u"""
/* This file was automatically generated by "generate_waveforms.py".
(C) 2014 Christophe Jacquet.
Released under the GNU GPL v3 license.
*/
"""
outc.write(header)
outh.write(header)
def generate_bit(name):
offset = 240
l = 96
count = 2
sample = numpy.zeros(3*l)
sample[l] = 1
sample[2*l] = -1
# Apply the data-shaping filter
sf = rds.pulse_shaping_filter(96*8, 228000)
shapedSamples = numpy.convolve(sample, sf)
out = shapedSamples[528-288:528+288] #[offset:offset+l*count]
#plt.plot(sf)
#plt.plot(out)
#plt.show()
iout = (out * 20000./max(abs(out)) ).astype(numpy.dtype('>i2'))
wavfile.write(u"waveform_{}.wav".format(name), sample_rate, iout)
outc.write(u"float waveform_{name}[] = {{{values}}};\n\n".format(
name = name,
values = u", ".join(map(unicode, out/2.5))))
# note: need to limit the amplitude so as not to saturate when the biphase
# waveforms are summed
outh.write(u"extern float waveform_{name}[{size}];\n".format(name=name, size=len(out)))
generate_bit("biphase")
outc.close()
outh.close() | gpl-3.0 |
MarkHedleyJones/Electrode_Interface_Model | plot_currentTimeFaradaicCPE_longDischarge.py | 1 | 3407 | import sys
import matplotlib.pyplot as plt
import lib.plot.formatter
import lib.files.dataset
import numpy as np
import os
from pyPdf import PdfFileWriter, PdfFileReader
from matplotlib.ticker import FuncFormatter
def fetch(filename):
filename = 'measurements/pbs/faradaic/diode/' + filename
if os.path.isfile(filename):
data, settings = lib.files.dataset.import_dataset(filename)
return data
else:
return None
def combine_graphs(filenames, outputFilename):
"""
Combines multiple graphs into a single document
"""
print "Starting pdf export"
print filenames
output = PdfFileWriter()
files = []
inputStreams = []
for filename in filenames:
files.append(file(filename, "rb"))
num = len(files) - 1
inputStreams.append(PdfFileReader(files[num]))
output.addPage(inputStreams[num].getPage(0))
outputStream = file(outputFilename, "wb")
output.write(outputStream)
outputStream.close()
for filename in files:
filename.close()
concs = [1.0 / (2 ** x) for x in range(4)]
colours = ['red', 'green', 'blue']
colours = ['blue', 'orange', 'green', 'red']
filenames = []
waitTime = 64
lib.plot.formatter.plot_params['margin']['top'] = 0.05
lib.plot.formatter.plot_params['margin']['left'] = 0.08
lib.plot.formatter.format(style='IEEE')
voltages = [0.08 * (x + 1) for x in range(15)]
voltages = voltages[7:-3]
for i, voltage in enumerate(voltages):
filename = 'measurements/pbs/cpe_charge_10000s/'
filename += str(voltage) + 'V_10000s.npy'
data = np.load(filename)
plt.plot(map(lambda x: x - data['time'][0], data['time']),
data['current'],
label=str(voltage) + 'V')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%1.0f' % (x * 1e6)))
plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%1.0f' % (x)))
plt.gca().set_ylabel('Current ($\mu A$)')
plt.gca().set_xlabel('Time (seconds)')
plt.legend(frameon=False, loc=0)
plt.gca().set_xlim(0, 10000)
plt.gca().set_yscale('log')
plt.gca().set_ylim(1e-8, 3e-6)
plt.savefig('plot_currentTimeFaradaicCPE_longDischarge_10000s_Stacked_IEEE.pdf',
format='pdf')
#
# def time_current(conc, waitTime):
# filename = str(conc) + 'X-PBS_' + str(waitTime) + 's_stirred.csv'
# data = fetch(filename)
#
# plt.scatter(map(lambda x: x - data['time'][0], data['time']),
# data['current'],
# label=str(conc),
# edgecolors='none',
# s=2)
# plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%1.0f' % (x * 1e6)))
# plt.gca().xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%1.0f' % (x)))
# plt.gca().set_ylabel('Current ($\mu A$)')
# plt.gca().set_xlabel('Time (seconds)')
# plt.title(str(conc) + 'X PBS - ' + str(waitTime) + ' second wait time', size=8)
#
# plt.gca().set_xlim(0, 1000)
# plt.gca().set_ylim(-0.5e-5, 1.6e-5)
# filename = '../graphs/currentTimeFaradaicCPE_' + str(conc) + 'X-PBS.pdf'
# plt.savefig(filename, format='pdf')
# return filename
#
# filenames = []
# for conc in concs:
# plt.clf()
# lib.plot.formatter.plot_params['margin']['top'] = 0.1
# lib.plot.formatter.format()
# filenames.append(time_current(conc, 64))
#
# combine_graphs(filenames, '../graphs/currentTimeFaradaicCPE_All.pdf')
| mit |
mblondel/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
tbattz/logsFlightGearReplay | timeControl.py | 1 | 4536 | '''
Created on 11 Aug 2016
@author: bcub3d-build-ubuntu
'''
from Tkinter import *
import ttk
from threading import Thread
import readLog
import socket
import sendDataGUI
import math
import playbackFunctions
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import cutData
import plotClasses
import sys
# Setup
if len(sys.argv)>1:
filename = sys.argv[1]
else:
filename = '45.BIN'
overwrite = False # Overwrite csv file
updateRate = 10 # Hz
mainHeaders = sorted(['GPS','IMU','RCIN','RCOU','BARO','POWR','CMD','ARSP','CURR','ATT','MAG','MODE','IMU2','AHR2','POS','MAG2','RATE','CTUN','STAT']) # The main headers to select to plot
# Flight Gear UDP Connection
UDP_IP = '127.0.0.1'
UDP_PORT = 5503
# ============================= Load Data ============================= #
# Load csv file
csvfile = readLog.convert2CSV(filename,overwrite=overwrite)
data = readLog.readCsv(csvfile)
print '------------------------------------------------------------------'
# Create socket to flight gear
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((UDP_IP,UDP_PORT))
# ======================== Simulation Thread ========================= #
# Start Time Thread
simThread = sendDataGUI.outDataThread(data,sock,updateRate)
simThread.start()
# ========================= Tkinter Control ========================== #
# Create Tkinter Window
master = Tk()
master.wm_title('Pixhawk Log Playback Controls')
# Create scale bar
tickinterval = math.floor((data.timeVec[-1]/20.0)/100.0)*100
maxTime = data.timeVec[-1]
timeScale = Scale(master, from_=0, to=maxTime,tickinterval=tickinterval, orient=HORIZONTAL,length=1000,command=lambda x: simThread.updatePosition(bypass=True,bypassTime=float(timeScale.get()),mode=v))
timeScale.grid(row=0,columnspan=49)
# Mode Radio Button
v, rb = playbackFunctions.createModeRadioButton(master, row=1, column=12)
# Create Start/Pause Buttons
Button(master,text='Start Replay', command=lambda: simThread.startSim(timeScale.get())).grid(row=1,column=38)
Button(master,text='Pause Replay', command=simThread.pauseSim).grid(row=1,column=39)
# "Go To" Buttons and Boxes
# Go to Entry Box
e = Entry(master,width=6)
e.grid(row=1,column=1)
e.insert(0,"0")
# Create Go To Button
Button(master,text='Go to:', command=lambda: playbackFunctions.goToButton(e, timeScale, simThread)).grid(row=1,column=0)
# Seconds Label
l = Label(master,text='s')
l.grid(row=1,column=2,sticky=W)
# Time Marking
# Label
l2 = Label(master,text="Mark [Set,Jump]:")
l2.grid(row=1,column=42,sticky=E)
# Button Set 1
c1 = playbackFunctions.createMark(master,'green',10,990)
s1 = Button(master,text='S1',bg='green',command=lambda: playbackFunctions.set1(timeScale, c1, master, maxTime, simThread)).grid(row=1,column=43)
j1 = Button(master,text="J1",bg='green',command=lambda: simThread.jump1(timeScale)).grid(row=1,column=44)
# Button Set 2
c2 = playbackFunctions.createMark(master,'red',10,990)
s2 = Button(master,text='S2',bg='red',command=lambda: playbackFunctions.set2(timeScale, c2, master, maxTime, simThread)).grid(row=1,column=45)
j2 = Button(master,text="J2",bg='red',command=lambda: simThread.jump2(timeScale)).grid(row=1,column=46)
# Button Set 3
c3 = playbackFunctions.createMark(master,'cyan',10,990)
s3 = Button(master,text='S3',bg='cyan',command=lambda: playbackFunctions.set3(timeScale, c3, master, maxTime, simThread)).grid(row=1,column=47)
j3 = Button(master,text="J3",bg='cyan',command=lambda: simThread.jump3(timeScale)).grid(row=1,column=48)
# Separator
ttk.Separator(master,orient=HORIZONTAL).grid(row=2,columnspan=49,sticky='ew')
# ======================== Tkinter Plotting ========================= #
# Create Plotting Frame
plotID = 1
master.plotFrame = []
master = plotClasses.addNewFigure(plotID,master,mainHeaders,data,simThread)
# Add time selector (First Plot Only)
master.plotFrame[0] = plotClasses.addTimeSelector(master.plotFrame[0])
# Add Forever y Limits Checkbox
master.plotFrame[0] = plotClasses.addForeverYLimits(master.plotFrame[0])
# ========================= Tkinter Loop ============================ #
while True:
if simThread.running:
timeScale.set(simThread.currTime)
# Update Plots
for plotFrame in master.plotFrame:
plotFrame.updatePlot()
plotFrame.canvas.draw()
master.update_idletasks()
master.update()
# Close Socket
sock.close()
| gpl-3.0 |
Eric89GXL/scikit-learn | sklearn/tree/tests/test_export.py | 37 | 2897 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
"""Check correctness of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
"""Check for errors of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/tools/_testing.py | 29 | 4809 | """Testing helper functions
Warning: current status experimental, mostly copy paste
Warning: these functions will be changed without warning as the need
during refactoring arises.
The first group of functions provide consistency checks
"""
import numpy as np
from numpy.testing import assert_allclose, assert_
from nose import SkipTest
# the following are copied from
# statsmodels.base.tests.test_generic_methods.CheckGenericMixin
# and only adjusted to work as standalone functions
def check_ttest_tvalues(results):
# test that t_test has same results a params, bse, tvalues, ...
res = results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def check_ftest_pvalues(results):
res = results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def check_fitted(results):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(results, '_results'):
results = results._results
else:
results = results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def check_predict_types(results):
res = results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(results, '_results'):
results = results._results
else:
results = results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
#import pandas
#predicted = res.predict(pandas.DataFrame(p_exog))
#assert_(isinstance(predicted, pandas.DataFrame))
#assert_allclose(predicted, fitted, rtol=1e-12)
| bsd-3-clause |
cbmoore/statsmodels | examples/python/contrasts.py | 33 | 8722 |
## Contrasts Overview
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
# This document is based heavily on this excellent resource from UCLA http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
# A categorical variable of K categories, or levels, usually enters a regression as a sequence of K-1 dummy variables. This amounts to a linear hypothesis on the level means. That is, each test statistic for these variables amounts to testing whether the mean for that level is statistically significantly different from the mean of the base category. This dummy coding is called Treatment coding in R parlance, and we will follow this convention. There are, however, different coding methods that amount to different sets of linear hypotheses.
#
# In fact, the dummy coding is not technically a contrast coding. This is because the dummy variables add to one and are not functionally independent of the model's intercept. On the other hand, a set of *contrasts* for a categorical variable with `k` levels is a set of `k-1` functionally independent linear combinations of the factor level means that are also independent of the sum of the dummy variables. The dummy coding isn't wrong *per se*. It captures all of the coefficients, but it complicates matters when the model assumes independence of the coefficients such as in ANOVA. Linear regression models do not assume independence of the coefficients and thus dummy coding is often the only coding that is taught in this context.
#
# To have a look at the contrast matrices in Patsy, we will use data from UCLA ATS. First let's load the data.
##### Example Data
import pandas as pd
url = 'http://www.ats.ucla.edu/stat/data/hsb2.csv'
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
# It will be instructive to look at the mean of the dependent variable, write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African American and 4 = Caucasian)).
hsb2.groupby('race')['write'].mean()
##### Treatment (Dummy) Coding
# Dummy coding is likely the most well known coding scheme. It compares each level of the categorical variable to a base reference level. The base reference level is the value of the intercept. It is the default contrast in Patsy for unordered categorical factors. The Treatment contrast matrix for race would be
from patsy.contrasts import Treatment
levels = [1,2,3,4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
# Here we used `reference=0`, which implies that the first level, Hispanic, is the reference category against which the other level effects are measured. As mentioned above, the columns do not sum to zero and are thus not independent of the intercept. To be explicit, let's look at how this would encode the `race` variable.
hsb2.race.head(10)
print(contrast.matrix[hsb2.race-1, :][:20])
sm.categorical(hsb2.race.values)
# This is a bit of a trick, as the `race` category conveniently maps to zero-based indices. If it does not, this conversion happens under the hood, so this won't work in general but nonetheless is a useful exercise to fix ideas. The below illustrates the output using the three contrasts above
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
# We explicitly gave the contrast for race; however, since Treatment is the default, we could have omitted this.
#### Simple Coding
# Like Treatment Coding, Simple Coding compares each level to a fixed reference level. However, with simple coding, the intercept is the grand mean of all the levels of the factors. Patsy doesn't have the Simple contrast included, but you can easily define your own contrasts. To do so, write a class that contains a code_with_intercept and a code_without_intercept method that returns a patsy.contrast.ContrastMatrix instance
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1./nlevels * np.ones((nlevels, nlevels-1))
contr[1:][np.diag_indices(nlevels-1)] = (nlevels-1.)/nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack((np.ones(len(levels)),
self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby('race')['write'].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
#### Sum (Deviation) Coding
# Sum coding compares the mean of the dependent variable for a given level to the overall mean of the dependent variable over all the levels. That is, it uses contrasts between each of the first k-1 levels and level k In this example, level 1 is compared to all the others, level 2 to all the others, and level 3 to all the others.
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
# This corresponds to a parameterization that forces all the coefficients to sum to zero. Notice that the intercept here is the grand mean where the grand mean is the mean of means of the dependent variable by each level.
hsb2.groupby('race')['write'].mean().mean()
#### Backward Difference Coding
# In backward difference coding, the mean of the dependent variable for a level is compared with the mean of the dependent variable for the prior level. This type of coding may be useful for a nominal or an ordinal variable.
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
# For example, here the coefficient on level 1 is the mean of `write` at level 2 compared with the mean at level 1. Ie.,
res.params["C(race, Diff)[D.1]"]
hsb2.groupby('race').mean()["write"][2] - hsb2.groupby('race').mean()["write"][1]
#### Helmert Coding
# Our version of Helmert coding is sometimes referred to as Reverse Helmert Coding. The mean of the dependent variable for a level is compared to the mean of the dependent variable over all previous levels. Hence, the name 'reverse' being sometimes applied to differentiate from forward Helmert coding. This comparison does not make much sense for a nominal variable such as race, but we would use the Helmert contrast like so:
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
# To illustrate, the comparison on level 4 is the mean of the dependent variable at the previous three levels taken from the mean at level 4
grouped = hsb2.groupby('race')
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
# As you can see, these are only equal up to a constant. Other versions of the Helmert contrast give the actual difference in means. Regardless, the hypothesis tests are the same.
k = 4
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
k = 3
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
#### Orthogonal Polynomial Coding
# The coefficients taken on by polynomial coding for `k=4` levels are the linear, quadratic, and cubic trends in the categorical variable. The categorical variable here is assumed to be represented by an underlying, equally spaced numeric variable. Therefore, this type of encoding is used only for ordered categorical variables with equal spacing. In general, the polynomial contrast produces polynomials of order `k-1`. Since `race` is not an ordered factor variable let's use `read` as an example. First we need to create an ordered categorical from `read`.
hsb2['readcat'] = pd.cut(hsb2.read, bins=3)
hsb2.groupby('readcat').mean()['write']
from patsy.contrasts import Poly
levels = hsb2.readcat.unique().tolist()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
# As you can see, readcat has a significant linear effect on the dependent variable `write` but not a significant quadratic or cubic effect.
| bsd-3-clause |
piyush8311/ns3-arp | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
tomlof/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
drammock/expyfun | expyfun/visual/_visual.py | 2 | 46036 | """
Visual stimulus design
======================
Tools for drawing shapes and text on the screen.
"""
# Authors: Dan McCloy <[email protected]>
# Eric Larson <[email protected]>
# Ross Maddox <[email protected]>
#
# License: BSD (3-clause)
from ctypes import (cast, pointer, POINTER, create_string_buffer, c_char,
c_int, c_float)
from functools import partial
import re
import warnings
import numpy as np
try:
from PyOpenGL import gl
except ImportError:
from pyglet import gl
from .._utils import check_units, string_types, logger, _new_pyglet
def _convert_color(color, byte=True):
"""Convert 3- or 4-element color into OpenGL usable color"""
from matplotlib.colors import colorConverter
color = (0., 0., 0., 0.) if color is None else color
color = 255 * np.array(colorConverter.to_rgba(color))
color = color.astype(np.uint8)
if not byte:
color = (color / 255.).astype(np.float32)
return tuple(color)
def _replicate_color(color, pts):
"""Convert single color to color array for OpenGL trianglulations"""
return np.tile(color, len(pts) // 2)
##############################################################################
# Text
class Text(object):
"""A text object.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
text : str
The text to display.
pos : array
2-element array consisting of X- and Y-position coordinates.
color : matplotlib Color
Color of the text.
font_name : str
Font to use.
font_size : float
Font size (points) to use.
height : float | None
Height of the text region. None will automatically allocate the
necessary size.
width : float | None | str
Width (in pixels) of the text region. `'auto'` will allocate 80% of
the screen width, useful for instructions. None will automatically
allocate sufficient space, but not that this disables text wrapping.
anchor_x : str
Horizontal text anchor (e.g., ``'center'``).
anchor_y : str
Vertical text anchor (e.g., ``'center'``).
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
wrap : bool
Whether or not the text will wrap to fit in screen, appropriate for
multiline text. Inappropriate for text requiring precise positioning.
attr : bool
Should the text be interpreted with pyglet's ``decode_attributed``
method? This allows inline formatting for text color, e.g.,
``'This is {color (255, 0, 0, 255)}red text'``. If ``attr=True``, the
values of ``font_name``, ``font_size``, and ``color`` are automatically
prepended to ``text`` (though they will be overridden by any inline
formatting within ``text`` itself).
Returns
-------
text : instance of Text
The text object.
"""
def __init__(self, ec, text, pos=(0, 0), color='white',
font_name='Arial', font_size=24, height=None,
width='auto', anchor_x='center', anchor_y='center',
units='norm', wrap=False, attr=True):
import pyglet
pos = np.array(pos)[:, np.newaxis]
pos = ec._convert_units(pos, units, 'pix')
if width == 'auto':
width = float(ec.window_size_pix[0]) * 0.8
elif isinstance(width, string_types):
raise ValueError('"width", if str, must be "auto"')
self._attr = attr
if wrap:
text = text + '\n ' # weird Pyglet bug
if self._attr:
preamble = ('{{font_name \'{}\'}}{{font_size {}}}{{color {}}}'
'').format(font_name, font_size, _convert_color(color))
doc = pyglet.text.decode_attributed(preamble + text)
self._text = pyglet.text.layout.TextLayout(
doc, width=width, height=height, multiline=wrap,
dpi=int(ec.dpi))
else:
self._text = pyglet.text.Label(
text, width=width, height=height, multiline=wrap,
dpi=int(ec.dpi))
self._text.color = _convert_color(color)
self._text.font_name = font_name
self._text.font_size = font_size
self._text.x = pos[0]
self._text.y = pos[1]
self._text.anchor_x = anchor_x
self._text.anchor_y = anchor_y
def set_color(self, color):
"""Set the text color
Parameters
----------
color : matplotlib Color | None
The color. Use None for no color.
"""
if self._attr:
self._text.document.set_style(0, len(self._text.document.text),
{'color': _convert_color(color)})
else:
self._text.color = _convert_color(color)
def draw(self):
"""Draw the object to the display buffer"""
self._text.draw()
##############################################################################
# Triangulations
tri_vert = """
#version 120
attribute vec2 a_position;
uniform mat4 u_view;
void main()
{
gl_Position = u_view * vec4(a_position, 0.0, 1.0);
}
"""
tri_frag = """
#version 120
uniform vec4 u_color;
void main()
{
gl_FragColor = u_color;
}
"""
def _check_log(obj, func):
log = create_string_buffer(4096)
ptr = cast(pointer(log), POINTER(c_char))
func(obj, 4096, pointer(c_int()), ptr)
message = log.value
message = message.decode()
if message.startswith('No errors') or \
re.match('.*shader was successfully compiled.*', message) or \
message == 'Vertex shader(s) linked, fragment shader(s) linked.\n':
pass
elif message:
raise RuntimeError(message)
class _Triangular(object):
"""Super class for objects that use triangulations and/or lines"""
def __init__(self, ec, fill_color, line_color, line_width, line_loop):
self._ec = ec
self._line_width = line_width
self._line_loop = line_loop # whether or not lines drawn are looped
# initialize program and shaders
self._program = gl.glCreateProgram()
vertex = gl.glCreateShader(gl.GL_VERTEX_SHADER)
buf = create_string_buffer(tri_vert.encode('ASCII'))
ptr = cast(pointer(pointer(buf)), POINTER(POINTER(c_char)))
gl.glShaderSource(vertex, 1, ptr, None)
gl.glCompileShader(vertex)
_check_log(vertex, gl.glGetShaderInfoLog)
fragment = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
buf = create_string_buffer(tri_frag.encode('ASCII'))
ptr = cast(pointer(pointer(buf)), POINTER(POINTER(c_char)))
gl.glShaderSource(fragment, 1, ptr, None)
gl.glCompileShader(fragment)
_check_log(fragment, gl.glGetShaderInfoLog)
gl.glAttachShader(self._program, vertex)
gl.glAttachShader(self._program, fragment)
gl.glLinkProgram(self._program)
_check_log(self._program, gl.glGetProgramInfoLog)
gl.glDetachShader(self._program, vertex)
gl.glDetachShader(self._program, fragment)
gl.glUseProgram(self._program)
# Prepare buffers and bind attributes
loc = gl.glGetUniformLocation(self._program, b'u_view')
view = ec.window_size_pix
view = np.diag([2. / view[0], 2. / view[1], 1., 1.])
view[-1, :2] = -1
view = view.astype(np.float32).ravel()
gl.glUniformMatrix4fv(loc, 1, False, (c_float * 16)(*view))
self._counts = dict()
self._colors = dict()
self._buffers = dict()
self._points = dict()
self._tris = dict()
for kind in ('line', 'fill'):
self._counts[kind] = 0
self._colors[kind] = (0., 0., 0., 0.)
self._buffers[kind] = dict(array=gl.GLuint())
gl.glGenBuffers(1, pointer(self._buffers[kind]['array']))
self._buffers['fill']['index'] = gl.GLuint()
gl.glGenBuffers(1, pointer(self._buffers['fill']['index']))
gl.glUseProgram(0)
self.set_fill_color(fill_color)
self.set_line_color(line_color)
def _set_points(self, points, kind, tris):
"""Set fill and line points."""
if points is None:
self._counts[kind] = 0
points = np.asarray(points, dtype=np.float32, order='C')
assert points.ndim == 2 and points.shape[1] == 2
array_count = points.size // 2 if kind == 'line' else points.size
if kind == 'fill':
assert tris is not None
tris = np.asarray(tris, dtype=np.uint32, order='C')
assert tris.ndim == 1 and tris.size % 3 == 0
tris.shape = (-1, 3)
assert (tris < len(points)).all()
self._tris[kind] = tris
del tris
self._points[kind] = points
del points
gl.glUseProgram(self._program)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._buffers[kind]['array'])
gl.glBufferData(gl.GL_ARRAY_BUFFER, self._points[kind].size * 4,
self._points[kind].tobytes(),
gl.GL_STATIC_DRAW)
if kind == 'line':
self._counts[kind] = array_count
if kind == 'fill':
self._counts[kind] = self._tris[kind].size
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER,
self._buffers[kind]['index'])
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER,
self._tris[kind].size * 4,
self._tris[kind].tobytes(),
gl.GL_STATIC_DRAW)
gl.glUseProgram(0)
def _set_fill_points(self, points, tris):
self._set_points(points, 'fill', tris)
def _set_line_points(self, points):
self._set_points(points, 'line', None)
def set_fill_color(self, fill_color):
"""Set the object color
Parameters
----------
fill_color : matplotlib Color | None
The fill color. Use None for no fill.
"""
self._colors['fill'] = _convert_color(fill_color, byte=False)
def set_line_color(self, line_color):
"""Set the object color
Parameters
----------
line_color : matplotlib Color | None
The fill color. Use None for no fill.
"""
self._colors['line'] = _convert_color(line_color, byte=False)
def set_line_width(self, line_width):
"""Set the line width in pixels
Parameters
----------
line_width : float
The line width. Must be given in pixels. Due to OpenGL
limitations, it must be `0.0 <= line_width <= 10.0`.
"""
line_width = float(line_width)
if not (0.0 <= line_width <= 10.0):
raise ValueError('line_width must be between 0 and 10')
self._line_width = line_width
def draw(self):
"""Draw the object to the display buffer."""
gl.glUseProgram(self._program)
for kind in ('fill', 'line'):
if self._counts[kind] > 0:
if kind == 'line':
if self._line_width <= 0.0:
continue
gl.glLineWidth(self._line_width)
if self._line_loop:
mode = gl.GL_LINE_LOOP
else:
mode = gl.GL_LINE_STRIP
cmd = partial(gl.glDrawArrays, mode, 0, self._counts[kind])
else:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER,
self._buffers[kind]['index'])
cmd = partial(gl.glDrawElements, gl.GL_TRIANGLES,
self._counts[kind], gl.GL_UNSIGNED_INT, 0)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER,
self._buffers[kind]['array'])
loc_pos = gl.glGetAttribLocation(self._program, b'a_position')
gl.glEnableVertexAttribArray(loc_pos)
gl.glVertexAttribPointer(loc_pos, 2, gl.GL_FLOAT, gl.GL_FALSE,
0, 0)
loc_col = gl.glGetUniformLocation(self._program, b'u_color')
gl.glUniform4f(loc_col, *self._colors[kind])
cmd()
# The following line is probably only necessary because
# Pyglet makes some assumptions about the GL state that
# it perhaps shouldn't. Without it, Text might not
# render properly (see #252)
gl.glDisableVertexAttribArray(loc_pos)
gl.glUseProgram(0)
class Line(_Triangular):
"""A connected set of line segments
Parameters
----------
ec : instance of ExperimentController
Parent EC.
coords : array-like
2 x N set of X, Y coordinates.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
line_color : matplotlib Color
Color of the line.
line_width : float
Line width in pixels.
line_loop : bool
If True, the last point will be joined to the first in a loop.
Returns
-------
line : instance of Line
The line object.
"""
def __init__(self, ec, coords, units='norm', line_color='white',
line_width=1.0, line_loop=False):
_Triangular.__init__(self, ec, fill_color=None, line_color=line_color,
line_width=line_width, line_loop=line_loop)
self.set_coords(coords, units)
self.set_line_color(line_color)
def set_coords(self, coords, units='norm'):
"""Set line coordinates
Parameters
----------
coords : array-like
2 x N set of X, Y coordinates.
units : str
Units to use.
"""
check_units(units)
coords = np.array(coords, dtype=float)
if coords.ndim == 1:
coords = coords[:, np.newaxis]
if coords.ndim != 2 or coords.shape[0] != 2:
raise ValueError('coords must be a vector of length 2, or an '
'array with 2 dimensions (with first dimension '
'having length 2')
self._set_line_points(self._ec._convert_units(coords, units, 'pix').T)
class Triangle(_Triangular):
"""A triangle
Parameters
----------
ec : instance of ExperimentController
Parent EC.
coords : array-like
2 x 3 set of X, Y coordinates.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color
Color of the triangle.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Triangle
The triangle object.
"""
def __init__(self, ec, coords, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_coords(coords, units)
self.set_fill_color(fill_color)
def set_coords(self, coords, units='norm'):
"""Set triangle coordinates
Parameters
----------
coords : array-like
2 x 3 set of X, Y coordinates.
units : str
Units to use.
"""
check_units(units)
coords = np.array(coords, dtype=float)
if coords.shape != (2, 3):
raise ValueError('coords must be an array of shape (2, 3), got %s'
% (coords.shape,))
points = self._ec._convert_units(coords, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2])
self._set_line_points(points)
class Rectangle(_Triangular):
"""A rectangle.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Rectangle
The rectangle object.
"""
def __init__(self, ec, pos, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_pos(pos, units)
def set_pos(self, pos, units='norm'):
"""Set the position of the rectangle
Parameters
----------
pos : array-like
X, Y, width, height of the rectangle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
# do this in normalized units, then convert
pos = np.array(pos)
if not (pos.ndim == 1 and pos.size == 4):
raise ValueError('pos must be a 4-element array-like vector')
self._pos = pos
w = self._pos[2]
h = self._pos[3]
points = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]]).T
points += np.array(self._pos[:2])[:, np.newaxis]
points = self._ec._convert_units(points, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2, 0, 2, 3])
self._set_line_points(points) # all 4 points used for line drawing
class Diamond(_Triangular):
"""A diamond.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Rectangle
The rectangle object.
"""
def __init__(self, ec, pos, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_pos(pos, units)
def set_pos(self, pos, units='norm'):
"""Set the position of the rectangle
Parameters
----------
pos : array-like
X, Y, width, height of the rectangle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
# do this in normalized units, then convert
pos = np.array(pos)
if not (pos.ndim == 1 and pos.size == 4):
raise ValueError('pos must be a 4-element array-like vector')
self._pos = pos
w = self._pos[2]
h = self._pos[3]
points = np.array([[w / 2., 0.],
[0., h / 2.],
[-w / 2., 0.],
[0., -h / 2.]]).T
points += np.array(self._pos[:2])[:, np.newaxis]
points = self._ec._convert_units(points, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2, 0, 2, 3])
self._set_line_points(points)
class Circle(_Triangular):
"""A circle or ellipse.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
radius : float | array-like
Radius of the circle. Can be array-like with two elements to
make an ellipse.
pos : array-like
2-element array-like with X, Y center positions.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
n_edges : int
Number of edges to use (must be >= 4) to approximate a circle.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
circle : instance of Circle
The circle object.
"""
def __init__(self, ec, radius=1, pos=(0, 0), units='norm',
n_edges=200, fill_color='white', line_color=None,
line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
if not isinstance(n_edges, int):
raise TypeError('n_edges must be an int')
if n_edges < 4:
raise ValueError('n_edges must be >= 4 for a reasonable circle')
self._n_edges = n_edges
# construct triangulation (never changes so long as n_edges is fixed)
tris = [[0, ii + 1, ii + 2] for ii in range(n_edges)]
tris = np.concatenate(tris)
tris[-1] = 1 # fix wrap for last triangle
self._orig_tris = tris
# need to set a dummy value here so recalculation doesn't fail
self._radius = np.array([1., 1.])
self.set_pos(pos, units)
self.set_radius(radius, units)
def set_radius(self, radius, units='norm'):
"""Set the position and radius of the circle
Parameters
----------
radius : array-like | float
X- and Y-direction extents (radii) of the circle / ellipse.
A single value (float) will be replicated for both directions.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
radius = np.atleast_1d(radius).astype(float)
if radius.ndim != 1 or radius.size > 2:
raise ValueError('radius must be a 1- or 2-element '
'array-like vector')
if radius.size == 1:
radius = np.r_[radius, radius]
# convert to pixel (OpenGL) units
self._radius = self._ec._convert_units(radius[:, np.newaxis],
units, 'pix')[:, 0]
# need to subtract center position
ctr = self._ec._convert_units(np.zeros((2, 1)), units, 'pix')[:, 0]
self._radius -= ctr
self._recalculate()
def set_pos(self, pos, units='norm'):
"""Set the position and radius of the circle
Parameters
----------
pos : array-like
X, Y center of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
pos = np.array(pos, dtype=float)
if not (pos.ndim == 1 and pos.size == 2):
raise ValueError('pos must be a 2-element array-like vector')
# convert to pixel (OpenGL) units
self._pos = self._ec._convert_units(pos[:, np.newaxis],
units, 'pix')[:, 0]
self._recalculate()
def _recalculate(self):
"""Helper to recalculate point coordinates"""
edges = self._n_edges
arg = 2 * np.pi * (np.arange(edges) / float(edges))
points = np.array([self._radius[0] * np.cos(arg),
self._radius[1] * np.sin(arg)])
points = np.c_[np.zeros((2, 1)), points] # prepend the center
points += np.array(self._pos[:2], dtype=float)[:, np.newaxis]
points = points.T
self._set_fill_points(points, self._orig_tris)
self._set_line_points(points[1:]) # omit center point for lines
class ConcentricCircles(object):
"""A set of filled concentric circles drawn without edges.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
radii : list of float
Radii of the circles. Note that circles will be drawn in order,
so using e.g., radii=[1., 2.] will cause the first circle to be
covered by the second.
pos : array-like
2-element array-like with the X, Y center position.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
See ``check_units`` for options.
colors : list or tuple of matplotlib Colors
Color to fill each circle with.
Returns
-------
circle : instance of Circle
The circle object.
"""
def __init__(self, ec, radii=(0.2, 0.05), pos=(0, 0), units='norm',
colors=('w', 'k')):
radii = np.array(radii, float)
if radii.ndim != 1:
raise ValueError('radii must be 1D')
if not isinstance(colors, (tuple, list)):
raise TypeError('colors must be a tuple, list, or array')
if len(colors) != len(radii):
raise ValueError('colors and radii must be the same length')
# need to set a dummy value here so recalculation doesn't fail
self._circles = [Circle(ec, r, pos, units, fill_color=c, line_width=0)
for r, c in zip(radii, colors)]
def __len__(self):
return len(self._circles)
def set_pos(self, pos, units='norm'):
"""Set the position of the circles
Parameters
----------
pos : array-like
X, Y center of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
for circle in self._circles:
circle.set_pos(pos, units)
def set_radius(self, radius, idx, units='norm'):
"""Set the radius of one of the circles
Parameters
----------
radius : float
Radius the circle.
idx : int
Index of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
self._circles[idx].set_radius(radius, units)
def set_radii(self, radii, units='norm'):
"""Set the color of each circle
Parameters
----------
radii : array-like
List of radii to assign to the circles. Must contain the same
number of radii as the number of circles.
units : str
Units to use. See ``check_units`` for options.
"""
radii = np.array(radii, float)
if radii.ndim != 1 or radii.size != len(self):
raise ValueError('radii must contain exactly {0} radii'
''.format(len(self)))
for idx, radius in enumerate(radii):
self.set_radius(radius, idx, units)
def set_color(self, color, idx):
"""Set the color of one of the circles
Parameters
----------
color : matplotlib Color
Color of the circle.
idx : int
Index of the circle.
"""
self._circles[idx].set_fill_color(color)
def set_colors(self, colors):
"""Set the color of each circle.
Parameters
----------
colors : list or tuple of matplotlib Colors
Must be of type list or tuple, and contain the same number of
colors as the number of circles.
"""
if not isinstance(colors, (tuple, list)) or len(colors) != len(self):
raise ValueError('colors must be a list or tuple with {0} colors'
''.format(len(self)))
for idx, color in enumerate(colors):
self.set_color(color, idx)
def draw(self):
"""Draw the fixation dot."""
for circle in self._circles:
circle.draw()
class FixationDot(ConcentricCircles):
"""A reasonable centered fixation dot.
This uses concentric circles, the inner of which has a radius of one
pixel, to create a fixation dot. If finer-grained control is desired,
consider using ``ConcentricCircles``.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
colors : list of matplotlib Colors
Color to fill the outer and inner circle with, respectively.
Returns
-------
fix : instance of FixationDot
The fixation dot.
"""
def __init__(self, ec, colors=('w', 'k')):
if len(colors) != 2:
raise ValueError('colors must have length 2')
super(FixationDot, self).__init__(ec, radii=[0.2, 0.2],
pos=[0, 0], units='deg',
colors=colors)
self.set_radius(1, 1, units='pix')
class ProgressBar(object):
"""A progress bar that can be displayed between sections.
This uses two rectangles, one outline, and one solid to show how much
progress has been made in the experiment.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the box center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
Must be either ``'norm'`` or ``'pix'``.
colors : list or tuple of matplotlib Colors
Colors to fill and outline the bar respectively. Defaults to green and
white.
"""
def __init__(self, ec, pos, units='norm', colors=('g', 'w')):
self._ec = ec
if len(colors) != 2:
raise ValueError('colors must have length 2')
if units not in ['norm', 'pix']:
raise ValueError('units must be either \'norm\' or \'pix\'')
pos = np.array(pos, dtype=float)
self._pos = pos
self._width = pos[2]
self._units = units
# initialize the bar with zero progress
self._pos_bar = pos.copy()
self._pos_bar[0] -= self._width * 0.5
self._init_x = self._pos_bar[0]
self._pos_bar[2] = 0
self._rectangles = [Rectangle(ec, self._pos_bar, units, colors[0],
None),
Rectangle(ec, self._pos, units, None, colors[1])]
def update_bar(self, percent):
"""Update the progress of the bar.
Parameters
----------
percent: float
The percentage of the bar to be filled. Must be between 0 and 1.
"""
if percent > 100 or percent < 0:
raise ValueError('percent must be a float between 0 and 100')
self._pos_bar[2] = percent * self._width / 100.
self._pos_bar[0] = self._init_x + self._pos_bar[2] * 0.5
self._rectangles[0].set_pos(self._pos_bar, self._units)
def draw(self):
"""Draw the progress bar."""
for rectangle in self._rectangles:
rectangle.draw()
##############################################################################
# Image display
class RawImage(object):
"""Create image from array for on-screen display.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
image_buffer : array
Array, shape (N, M[, 3/4]). Color values should range between 0 and 1.
pos : array-like
2-element array-like with X, Y (center) arguments.
scale : float
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc.
units : str
Units to use for the position. See ``check_units`` for options.
Returns
-------
img : instance of RawImage
The image object.
"""
def __init__(self, ec, image_buffer, pos=(0, 0), scale=1., units='norm'):
self._ec = ec
self._img = None
self.set_image(image_buffer)
self.set_pos(pos, units)
self.set_scale(scale)
def set_image(self, image_buffer):
"""Set image buffer data
Parameters
----------
image_buffer : array
N x M x 3 (or 4) array. Can be type ``np.float64`` or ``np.uint8``.
If ``np.float64``, color values must range between 0 and 1.
``np.uint8`` is slightly more efficient.
"""
from pyglet import image, sprite
image_buffer = np.ascontiguousarray(image_buffer)
if image_buffer.dtype not in (np.float64, np.uint8):
raise TypeError('image_buffer must be np.float64 or np.uint8')
if image_buffer.dtype == np.float64:
if image_buffer.max() > 1 or image_buffer.min() < 0:
raise ValueError('all float values must be between 0 and 1')
image_buffer = (image_buffer * 255).astype('uint8')
if image_buffer.ndim == 2: # grayscale
image_buffer = np.tile(image_buffer[..., np.newaxis], (1, 1, 3))
if not image_buffer.ndim == 3 or image_buffer.shape[2] not in [3, 4]:
raise RuntimeError('image_buffer incorrect size: {}'
''.format(image_buffer.shape))
# add alpha channel if necessary
dims = image_buffer.shape
fmt = 'RGB' if dims[2] == 3 else 'RGBA'
self._sprite = sprite.Sprite(image.ImageData(dims[1], dims[0], fmt,
image_buffer.tobytes(),
-dims[1] * dims[2]))
def set_pos(self, pos, units='norm'):
"""Set image position.
Parameters
----------
pos : array-like
2-element array-like with X, Y (center) arguments.
units : str
Units to use. See ``check_units`` for options.
"""
pos = np.array(pos, float)
if pos.ndim != 1 or pos.size != 2:
raise ValueError('pos must be a 2-element array')
pos = np.reshape(pos, (2, 1))
self._pos = self._ec._convert_units(pos, units, 'pix').ravel()
@property
def bounds(self):
"""Left, Right, Bottom, Top (in pixels) of the image."""
pos = np.array(self._pos, float)
size = np.array([self._sprite.width,
self._sprite.height], float)
bounds = np.concatenate((pos - size / 2., pos + size / 2.))
return bounds[[0, 2, 1, 3]]
@property
def scale(self):
return self._scale
def set_scale(self, scale):
"""Create image from array for on-screen display.
Parameters
----------
scale : float
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc.
"""
scale = float(scale)
self._scale = scale
self._sprite.scale = self._scale
def draw(self):
"""Draw the image to the buffer"""
self._sprite.scale = self._scale
pos = self._pos - [self._sprite.width / 2., self._sprite.height / 2.]
try:
self._sprite.position = (pos[0], pos[1])
except AttributeError:
self._sprite.set_position(pos[0], pos[1])
self._sprite.draw()
def get_rect(self, units='norm'):
"""X, Y center, Width, Height of image.
Parameters
----------
units : str
Units to use for the position. See ``check_units`` for options.
Returns
-------
rect : ndarray
The rect.
"""
# left,right,bottom,top
lrbt = self._ec._convert_units(self.bounds.reshape(2, -1),
fro='pix', to=units)
center = self._ec._convert_units(self._pos.reshape(2, -1),
fro='pix', to=units)
width_height = np.diff(lrbt, axis=-1)
return np.squeeze(np.concatenate([center, width_height]))
class Video(object):
"""Read video file and draw it to the screen.
Parameters
----------
ec : instance of expyfun.ExperimentController
file_name : str
the video file path
pos : array-like
2-element array-like with X, Y elements.
units : str
Units to use for the position. See ``check_units`` for options.
scale : float | str
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc. If scale is a string, it must be either ``'fill'``
(which ensures the entire ``ExperimentController`` window is
covered by the video, at the expense of some parts of the video
potentially being offscreen), or ``'fit'`` (which scales maximally
while ensuring none of the video is offscreen, and may result in
letterboxing or pillarboxing).
center : bool
If ``False``, the elements of ``pos`` specify the position of the lower
left corner of the video frame; otherwise they position the center of
the frame.
visible : bool
Whether to show the video when initialized. Can be toggled later using
`Video.set_visible` method.
Returns
-------
None
Notes
-----
This is a somewhat pared-down implementation of video playback. Looping is
not available, and the audio stream from the video file is discarded.
Timing of individual frames is relegated to the pyglet media player's
internal clock. Recommended for use only in paradigms where the relative
timing of audio and video are unimportant (e.g., if the video is merely
entertainment for the participant during a passive auditory task).
"""
def __init__(self, ec, file_name, pos=(0, 0), units='norm', scale=1.,
center=True, visible=True):
from pyglet.media import load, Player
self._ec = ec
self._source = load(file_name)
self._player = Player()
with warnings.catch_warnings(record=True): # deprecated eos_action
self._player.queue(self._source)
self._player._audio_player = None
frame_rate = self.frame_rate
if frame_rate is None:
logger.warning('Frame rate could not be determined')
frame_rate = 60.
self._dt = 1. / frame_rate
self._texture = None
self._playing = False
self._finished = False
self._pos = pos
self._units = units
self._center = center
self.set_scale(scale) # also calls set_pos
self._visible = visible
self._eos_fun = self._eos_new if _new_pyglet() else self._eos_old
def play(self, auto_draw=True):
"""Play video from current position.
Parameters
----------
auto_draw : bool
If True, add ``self.draw`` to ``ec.on_every_flip``.
Returns
-------
time : float
The timestamp (on the parent ``ExperimentController`` timeline) at
which ``play()`` was called.
"""
if not self._playing:
if auto_draw:
self._ec.call_on_every_flip(self.draw)
self._player.play()
self._playing = True
else:
warnings.warn('ExperimentController.video.play() called when '
'already playing.')
return self._ec.get_time()
def pause(self):
"""Halt video playback.
Returns
-------
time : float
The timestamp (on the parent ``ExperimentController`` timeline) at
which ``pause()`` was called.
"""
if self._playing:
try:
idx = self._ec.on_every_flip_functions.index(self.draw)
except ValueError: # not auto_draw
pass
else:
self._ec.on_every_flip_functions.pop(idx)
self._player.pause()
self._playing = False
else:
warnings.warn('ExperimentController.video.pause() called when '
'already paused.')
return self._ec.get_time()
def _delete(self):
"""Halt video playback and remove player."""
if self._playing:
self.pause()
self._player.delete()
def _scale_texture(self):
if self._texture:
self._texture.width = self.source_width * self._scale
self._texture.height = self.source_height * self._scale
def set_scale(self, scale=1.):
"""Set video scale.
Parameters
----------
scale : float | str
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc. If scale is a string, it must be either ``'fill'``
(which ensures the entire ``ExperimentController`` window is
covered by the video, at the expense of some parts of the video
potentially being offscreen), or ``'fit'`` (which scales maximally
while ensuring none of the video is offscreen, which may result in
letterboxing).
"""
if isinstance(scale, string_types):
_scale = self._ec.window_size_pix / np.array((self.source_width,
self.source_height),
dtype=float)
if scale == 'fit':
scale = _scale.min()
elif scale == 'fill':
scale = _scale.max()
self._scale = float(scale) # allows [1, 1., '1']; others: ValueError
if self._scale <= 0:
raise ValueError('Video scale factor must be strictly positive.')
self._scale_texture()
self.set_pos(self._pos, self._units, self._center)
def set_pos(self, pos, units='norm', center=True):
"""Set video position.
Parameters
----------
pos : array-like
2-element array-like with X, Y elements.
units : str
Units to use for the position. See ``check_units`` for options.
center : bool
If ``False``, the elements of ``pos`` specify the position of the
lower left corner of the video frame; otherwise they position the
center of the frame.
"""
pos = np.array(pos, float)
if pos.size != 2:
raise ValueError('pos must be a 2-element array')
pos = np.reshape(pos, (2, 1))
pix = self._ec._convert_units(pos, units, 'pix').ravel()
offset = np.array((self.width, self.height)) // 2 if center else 0
self._pos = pos
self._actual_pos = pix - offset
self._pos_unit = units
self._pos_centered = center
def _draw(self):
self._texture = self._player.get_texture()
self._scale_texture()
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
self._texture.blit(*self._actual_pos)
def draw(self):
"""Draw the video texture to the screen buffer."""
self._player.update_texture()
# detect end-of-stream to prevent pyglet from hanging:
if not self._eos:
if self._visible:
self._draw()
else:
self._finished = True
self.pause()
self._ec.check_force_quit()
def set_visible(self, show, flip=False):
"""Show/hide the video frame.
Parameters
----------
show : bool
Show or hide.
flip : bool
If True, flip after showing or hiding.
"""
if show:
self._visible = True
self._draw()
else:
self._visible = False
self._ec.flip()
if flip:
self._ec.flip()
# PROPERTIES
@property
def _eos(self):
return self._eos_fun()
def _eos_old(self):
return (self._player._last_video_timestamp is not None and
self._player._last_video_timestamp ==
self._source.get_next_video_timestamp())
def _eos_new(self):
ts = self._source.get_next_video_timestamp()
dur = self._source._duration
return ts is None or ts >= dur
@property
def playing(self):
return self._playing
@property
def finished(self):
return self._finished
@property
def position(self):
return np.squeeze(self._pos)
@property
def scale(self):
return self._scale
@property
def duration(self):
return self._source.duration
@property
def frame_rate(self):
return self._source.video_format.frame_rate
@property
def dt(self):
return self._dt
@property
def time(self):
return self._player.time
@property
def width(self):
return self.source_width * self._scale
@property
def height(self):
return self.source_height * self._scale
@property
def source_width(self):
return self._source.video_format.width
@property
def source_height(self):
return self._source.video_format.height
@property
def time_offset(self):
return self._ec.get_time() - self._player.time
| bsd-3-clause |
lemmalearning/Easy-Figures | shapes/Ticks.py | 1 | 8844 | import matplotlib.pyplot as plt
import math
import numpy as np
class Ticks:
"""
Creates an Axis object which contains ticks, spine arrows, and grids.
__init__ - Creates the object and sets the various class variables
Ticks - Creates the class variables required for drawing tick marks
__draw__ - Draws the axis and tick marks according to class variables
"""
def __init__(self, grid, minorGrid, ticks, xticks, yticks, minorticks, xminorticks, yminorticks, fontsize, boxOrigin, origin, top, customLabels, figure):
self.grid = grid
self.minorGrid = minorGrid
self.minorticks = minorticks
self.xminorticks = xminorticks
self.yminorticks = yminorticks
self.xticks = xticks
self.yticks = yticks
self.ticks = ticks
x_isdict = isinstance(customLabels, list) and customLabels != [] and isinstance(customLabels[0], dict)
if x_isdict:
x_0 = len(set(['0', 0, 0.0, '0.0']).intersection(customLabels[0].keys())) > 0 # 0 in the x axis exists
else:
x_0 = False
y_isdict = isinstance(customLabels, list) and customLabels != [] and isinstance(customLabels[1], dict)
if y_isdict:
y_0 = len(set(['0', 0, 0.0, '0.0']).intersection(customLabels[1].keys())) > 0 # 0 in the y axis exists
else:
y_0 = False
if boxOrigin == True:
self.boxOrigin = True
elif (x_0 or y_0) and not boxOrigin:
self.boxOrigin = [x_0, y_0]
else:
self.boxOrigin = boxOrigin
self.customLabels = customLabels
if self.ticks:
self.xticks = self.yticks = self.ticks
if self.minorticks:
self.xminorticks = self.yminorticks = self.minorticks
if not self.minorticks and self.ticks:
self.minorticks = self.xminorticks = self.yminorticks = self.ticks/2.0
self.fontsize = fontsize
self.origin = origin
self.top = top
self.figure = figure
def serialize(self):
# The customLabels will be of the form [ { xValue: 'xLabel', ...}, { .. same for y ... } ]
# The 0.001 is so that it includes the ending as well
major_x = []
major_y = []
if self.xticks:
major_x = np.arange(
math.ceil(self.figure.xyrange[0][0] / self.xticks) * self.xticks,
self.figure.xyrange[0][1] + 0.001,
self.xticks
)
if self.yticks:
major_y = np.arange(
math.ceil(self.figure.xyrange[1][0] / self.yticks) * self.yticks,
self.figure.xyrange[1][1] + 0.001,
self.yticks
)
minor_x = []
minor_y = []
if self.xminorticks:
minor_x = np.arange(
math.ceil(self.figure.xyrange[0][0] / self.xminorticks) * self.xminorticks,
self.figure.xyrange[0][1] + 0.001,
self.xminorticks
)
if self.yminorticks:
minor_y = np.arange(
math.ceil(self.figure.xyrange[1][0] / self.yminorticks) * self.yminorticks,
self.figure.xyrange[1][1] + 0.001,
self.yminorticks
)
xticks = [ { "value": v, "label": str(v).rstrip('0').rstrip('.') if (v != 0 or self.origin) else None, "type": "major", "line": self.grid } for v in major_x ]
yticks = [ { "value": v, "label": str(v).rstrip('0').rstrip('.') if v != 0 else None, "type": "major", "line": self.grid } for v in major_y ]
for v in minor_x:
if v in major_x:
continue
xticks.append({ "value": v, "label": None, "type": "minor", "line": self.minorGrid })
for v in minor_y:
if v in major_y:
continue
yticks.append({ "value": v, "label": None, "type": "minor", "line": self.minorGrid })
customLabels = self.customLabels
if customLabels != None:
if customLabels[0]:
for i in range(0, len(xticks)):
v = xticks[i]["value"]
xticks[i]["label"] = customLabels[0][v] if v in self.customLabels[0] else None
if customLabels[1]:
for i in range(0, len(yticks)):
v = yticks[i]["value"]
yticks[i]["label"] = customLabels[1][v] if v in self.customLabels[1] else None
return {
# TODO: This assumes that a floating point tick interval was given
"xticks": xticks,
"yticks": yticks,
"showOrigin": self.origin
}
def check_MAXTICK(self):
"""
Checks the tick amounts to ensure they aren't generating greater than MAXTICKS
:return: NONE
"""
MAXTICKS = 10000 # Matplotlib specified
if self.ticks and (self.figure.abs_range_x / self.ticks > MAXTICKS or self.figure.abs_range_y / self.ticks > MAXTICKS):
raise Exception("Tick count too high")
if self.minorticks and (self.figure.abs_range_x / self.minorticks > MAXTICKS or self.figure.abs_range_y / self.minorticks > MAXTICKS):
raise Exception("Tick count too high")
if self.xticks and self.figure.abs_range_x / self.xticks > MAXTICKS:
raise Exception("Tick count too high")
if self.xminorticks and self.figure.abs_range_x / self.xminorticks > MAXTICKS:
raise Exception("Tick count too high")
if self.yticks and self.figure.abs_range_y / self.yticks > MAXTICKS:
raise Exception("Tick count too high")
if self.yminorticks and self.figure.abs_range_y / self.yminorticks > MAXTICKS:
raise Exception("Tick count too high")
def __draw__(self, zorder=1, box=False):
# Parse the grid color:
gridColor = self.figure.GRID[:-2]
gridAlpha = int(self.figure.GRID[-2:], 16) / 256.0
if self.grid is not False:
self.figure.ax.grid(which='major', color=gridColor if self.grid == True else self.grid,
linestyle='dashed', linewidth=.5, alpha=gridAlpha)
if self.minorGrid is not False:
self.figure.ax.grid(which='minor', color=gridColor if self.minorGrid == True else self.minorGrid,
linestyle='dashed', linewidth=.3, alpha=gridAlpha)
####### DRAW LABELS #######
if isinstance(self.ticks, int) and isinstance(self.minorticks, int) and self.ticks > self.minorticks:
self.minorGrid = True
self.check_MAXTICK() # check to make sure there aren't too many ticks
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(self.xticks) if self.xticks is not False else plt.NullLocator())
plt.gca().xaxis.set_minor_locator(plt.MultipleLocator(self.xminorticks) if self.xminorticks is not False else plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.MultipleLocator(self.yticks) if self.yticks is not False else plt.NullLocator())
plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(self.yminorticks) if self.yminorticks is not False else plt.NullLocator())
self.figure.ax.tick_params(axis='both', which='major', labelsize=self.fontsize)
ylabels = []
for item in self.figure.ax.get_yticks():
if float(item) == 0 and not self.boxOrigin==True and not (isinstance(self.boxOrigin, list) and self.boxOrigin[1]):
ylabels.append("")
elif math.floor(float(item)) == float(item): # it's an int
ylabels.append(int(item))
else:
ylabels.append(float(item))
xlabels = []
for item in self.figure.ax.get_xticks():
if float(item) == 0 and not self.boxOrigin==True and not (isinstance(self.boxOrigin, list) and self.boxOrigin[0]):
xlabels.append(" (0,0)" if self.origin else "")
elif math.floor(float(item)) == float(item): # it's an int
xlabels.append(int(item))
else:
xlabels.append(float(item))
if self.customLabels and (self.customLabels[0] or self.customLabels[0] == {}):
for i,label in enumerate(xlabels):
if label == '':
continue
key = None
if int(label) in self.customLabels[0]:
key = int(label)
if float(label) in self.customLabels[0]:
key = float(label)
if str(label) in self.customLabels[0]:
key = str(label)
if key != None:
if self.customLabels[0][key] == 'auto':
continue
else:
xlabels[i] = self.customLabels[0][key]
else:
xlabels[i] = ''
if self.customLabels and (self.customLabels[1] or self.customLabels[1] == {}):
for i, label in enumerate(ylabels):
if label == '':
continue
key = None
if int(label) in self.customLabels[1]:
key = int(label)
if float(label) in self.customLabels[1]:
key = float(label)
if str(label) in self.customLabels[1]:
key = str(label)
if key != None:
if self.customLabels[1][key] == 'auto':
continue
else:
ylabels[i] = self.customLabels[1][key]
else:
ylabels[i] = ''
else:
xlabels=xlabels[:-1]
ylabels=ylabels[:-1]
self.figure.ax.set_yticklabels([str(label).replace("-", "$-$") for label in ylabels])
self.figure.ax.set_xticklabels([str(label).replace("-", "$-$") for label in xlabels])
for label in self.figure.ax.xaxis.get_ticklabels():
label.set_bbox(dict(boxstyle='round', facecolor=self.figure.bgcolor, edgecolor='none', pad=0.1))
if '$' not in label.get_text() and not box:
label.set_horizontalalignment('left')
for label in self.figure.ax.yaxis.get_ticklabels():
label.set_bbox(dict(boxstyle='round', facecolor=self.figure.bgcolor, edgecolor='none', pad=0.1))
if '$' not in label.get_text() and not box:
label.set_verticalalignment('bottom')
if self.top:
self.figure.ax.xaxis.set_label_position('top')
| apache-2.0 |
BMP-TECH/mavlink | pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| lgpl-3.0 |
christophreimer/pytesmo | pytesmo/validation_framework/data_manager.py | 1 | 7186 | """
Created on 27.05.2015
@author: Andreea Plocon, [email protected]
"""
import itertools
import pandas as pd
class DataManager(object):
"""
Class to handle the data management.
Parameters
----------
datasets : dict of dicts
Keys: string, datasets names
Values: dict, containing the following fields
'class': object
Class containing the method read_ts for reading the data.
'columns': list
List of columns which will be used in the validation process.
'type': string
'reference' or 'other'.
'args': list, optional
Args for reading the data.
'kwargs': dict, optional
Kwargs for reading the data
'grids_compatible': boolean, optional
If set to True the grid point index is used directly when
reading other, if False then lon, lat is used and a nearest
neighbour search is necessary.
'use_lut': boolean, optional
If set to True the grid point index (obtained from a
calculated lut between reference and other) is used when
reading other, if False then lon, lat is used and a
nearest neighbour search is necessary.
'lut_max_dist': float, optional
Maximum allowed distance in meters for the lut calculation.
data_prep : object, optional
Object that provides the methods prep_reference and prep_other
which take the pandas.Dataframe provided by the read_ts methods (plus
other_name for prep_other) and do some data preparation on it before
temporal matching etc. can be used e.g. for special masking or anomaly
calculations.
period : list, optional
Of type [datetime start, datetime end]. If given then the two input
datasets will be truncated to start <= dates <= end.
Methods
-------
use_lut(other_name)
Returns lut between reference and other if use_lut for other dataset
was set to True.
get_result_names()
Return results names based on reference and others names.
read_reference(*args)
Function to read and prepare the reference dataset.
read_other(other_name, *args)
Function to read and prepare the other datasets.
"""
def __init__(self, datasets, data_prep=None, period=None):
"""
Initialize parameters.
"""
self.datasets = datasets
self.other_name = []
for dataset in datasets.keys():
if datasets[dataset]['type'] == 'reference':
self.reference_name = dataset
else:
self.other_name.append(dataset)
try:
self.reference_grid = self.datasets[
self.reference_name]['class'].grid
except AttributeError:
self.reference_grid = None
self.data_prep = data_prep
self.period = period
def get_luts(self):
"""
Returns luts between reference and others if use_lut for other datasets
was set to True.
Returns
-------
luts : dict
Keys: other datasets names
Values: lut between reference and other, or None
"""
luts = {}
for other_name in self.other_name:
if self.datasets[other_name]['use_lut']:
luts[other_name] = self.reference_grid.calc_lut(
self.datasets[other_name]['class'].grid,
max_dist=self.datasets[other_name]['lut_max_dist'])
else:
luts[other_name] = None
return luts
def get_results_names(self):
"""
Return results names based on reference and others names.
Returns
-------
results_names : list
Containing all combinations of
(referenceDataset.column, otherDataset.column)
"""
results_names = []
ref_columns = []
for column in self.datasets[self.reference_name]['columns']:
ref_columns.append(self.reference_name + '.' + column)
other_columns = []
for other in self.other_name:
for column in self.datasets[other]['columns']:
other_columns.append(other + '.' + column)
for comb in itertools.product(ref_columns, other_columns):
results_names.append(comb)
return results_names
def read_reference(self, *args):
"""
Function to read and prepare the reference dataset.
Takes either 1 (gpi) or 2 (lon, lat) arguments.
Parameters
----------
gpi : int
Grid point index
lon : float
Longitude of point
lat : float
Latitude of point
Returns
-------
ref_df : pandas.DataFrame or None
Reference dataframe.
"""
reference = self.datasets[self.reference_name]
args = list(args)
args.extend(reference['args'])
try:
ref_df = reference['class'].read_ts(*args, **reference['kwargs'])
except IOError:
return None
if len(ref_df) == 0:
return None
if self.data_prep is not None:
ref_df = self.data_prep.prep_reference(ref_df)
if len(ref_df) == 0:
return None
if isinstance(ref_df, pd.DataFrame) == False:
return None
if self.period is not None:
ref_df = ref_df[((ref_df.index >= self.period[0]) &
(ref_df.index <= self.period[1]))]
if len(ref_df) == 0:
return None
else:
return ref_df
def read_other(self, other_name, *args):
"""
Function to read and prepare the other datasets.
Takes either 1 (gpi) or 2 (lon, lat) arguments.
Parameters
----------
other_name : string
Name of the other dataset.
gpi : int
Grid point index
lon : float
Longitude of point
lat : float
Latitude of point
Returns
-------
other_df : pandas.DataFrame or None
Other dataframe.
"""
other = self.datasets[other_name]
args = list(args)
args.extend(other['args'])
try:
other_df = other['class'].read_ts(*args, **other['kwargs'])
except IOError:
return None
if len(other_df) == 0:
return None
if self.data_prep is not None:
other_df = self.data_prep.prep_other(other_df, other_name)
if len(other_df) == 0:
return None
if isinstance(other_df, pd.DataFrame) == False:
return None
if self.period is not None:
other_df = other_df[((other_df.index >= self.period[0]) &
(other_df.index <= self.period[1]))]
if len(other_df) == 0:
return None
else:
return other_df
| bsd-3-clause |
terkkila/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/covariance/robust_covariance.py | 15 | 31108 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while (det < previous_det and remaining_iterations > 0
and not np.isinf(det)):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start] +
X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : bool
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rousseeuw] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [ButlerDavies] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVD] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
# Check that the covariance of the support data is not equal to 0.
# Otherwise self.dist_ = 0 and thus correction = 0.
n_samples = len(self.dist_)
n_support = np.sum(self.support_)
if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
raise ValueError('The covariance matrix of the support data '
'is equal to 0, try to increase support_fraction')
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVDriessen] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
danstowell/markovrenewal | experiments/plotmultitest.py | 1 | 4377 | #!/bin/env python
# plot results from linloggmm.multitest_abagen_linlog()
# by Dan Stowell, summer 2012
import os.path
import csv
from math import log, exp, pi, sqrt, ceil, floor
from numpy import mean, std
import matplotlib.pyplot as plt
import matplotlib.cm as cm
annotdir = os.path.expanduser("~/svn/stored_docs/python/markovrenewal/output")
plotfontsize = "large" #"xx-small"
def rescale(num):
# return log((float(num) + 0.005) / (1.005 - num)) # logistic
return log(1.01 - num)
# NB this array determines the order in which stats are processed too, which gives the order of the legend
#runlabels = [('ip','Ideal recovery, trained on test data'), ('i','Ideal recovery'), ('is','Ideal recovery plus synthetic noise'), ('a','Recovery from audio'), ('ba','Recovery from audio (baseline)')]
knownlabels = {'snrk': "SNR known", 'snru': "SNR unknown", 'snrug': "SNR unknown, greedy inference"}
def fmt_chooser(nmix, known):
# return {1:'b', 2:'g', 4:'m'}[nmix] + {'snrk':'-.', 'snru':'-', 'snrug': ':'}[known]
return {1:'b', 2:'b', 4:'b'}[nmix] + {'snrk':'-.', 'snru':'-', 'snrug': ':'}[known]
# load csv into nested dict structure data[whichstat][runtype][known][nmix][snr][]
data = {}
nmixes = []
snrs = []
rdr = csv.DictReader(open("%s/multitest_abagen_linlog.txt" % annotdir, 'rb'))
nruns=0
for row in rdr:
if nruns==0: # first row, infer nruns
while ("val%i" % nruns) in row:
nruns += 1
row['runtype'] = 'merged' # This is a HACK to merge the pdf of 'coh' and 'seg' together
if row['whichstat'] not in data:
data[ row['whichstat']] = {}
if row['runtype'] not in data[row['whichstat']]:
data[ row['whichstat']][row['runtype']] = {}
row['nmix'] = int(row['nmix'])
if row['nmix'] not in nmixes:
nmixes.append(row['nmix'])
if row['nmix'] not in data[ row['whichstat']][row['runtype']]:
data[ row['whichstat']][row['runtype']][row['nmix']] = {}
if row['known'] not in data[ row['whichstat']][row['runtype']][row['nmix']]:
data[ row['whichstat']][row['runtype']][row['nmix']][row['known']] = {}
row['snr'] = int(row['snr'])
if row['snr'] not in snrs:
snrs.append(row['snr'])
if row['snr'] not in data[ row['whichstat']][row['runtype']][row['nmix']][row['known']]:
data[ row['whichstat']][row['runtype']][row['nmix']][row['known']][row['snr']] = []
for i in xrange(nruns):
val = float(row['val%i' % i])
data[ row['whichstat']][row['runtype']][row['nmix']][row['known']][row['snr']].append(val)
snrs.sort()
snrs.reverse()
snrsrange = (min(snrs)-1, max(snrs)+1)
yticks = [0.3, 0.6, 0.8, 0.9, 0.95, 0.99, 1.0]
# break it down into separate plots:
for whichstat, sdata in data.iteritems():
for runtype, srdata in sdata.iteritems():
for nmix, srndata in srdata.iteritems():
# and in a single plot:
fig = plt.figure()
for known, srnkdata in srndata.iteritems():
linedata = []
for snr in snrs:
numlist = srnkdata[snr]
#numlist = [rescale(num) for num in numlist] # no, do it after calc'ing stats
# calc mean and stderr from 'numlist'
themean = mean(numlist)
stderr = std(numlist) / sqrt(len(numlist))
# transform the data for readability:
themean_l = rescale(themean)
stderr_l_up = rescale(themean + stderr) - themean_l
stderr_l_dn = themean_l - rescale(themean - stderr)
linedata.append({'snr':snr, 'mean': themean_l, 'stderr_up': stderr_l_up, 'stderr_dn': stderr_l_dn})
# draw a line
plt.errorbar([x['snr'] for x in linedata], \
[x['mean'] for x in linedata], \
([x['stderr_dn'] for x in linedata], [x['stderr_up'] for x in linedata]), \
label="%i items, %s" % (nmix, knownlabels[known]), fmt=fmt_chooser(nmix, known))
#plt.title("%s_%s" % (whichstat, runtype), fontsize=plotfontsize)
plt.xlabel("SNR", fontsize=plotfontsize)
plt.ylabel("%s" % whichstat, fontsize=plotfontsize)
plt.xticks(snrs, fontsize=plotfontsize)
plt.xlim(xmin=snrsrange[1], xmax=snrsrange[0])
plt.ylim(ymin=rescale(0.3), ymax=rescale(1.001))
plt.yticks(map(rescale, yticks), yticks, fontsize=plotfontsize)
#plt.yticks(fontsize=plotfontsize)
plt.legend(loc=(0.02, 0.05), prop={'size':'medium'})
plt.savefig("%s/pdf/plot_multitest_%s_%s_%s.pdf" % (annotdir, whichstat, runtype, nmix), papertype='A4', format='pdf')
| gpl-2.0 |
cl4rke/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/core/dtypes/base.py | 1 | 10873 | """
Extend pandas with custom array types.
"""
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type
import numpy as np
from pandas._typing import DtypeObj
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
if TYPE_CHECKING:
from pandas.core.arrays import ExtensionArray # noqa: F401
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
.. versionadded:: 0.23.0
See Also
--------
extensions.register_extension_dtype
extensions.ExtensionArray
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
The following attributes and methods influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
* _get_common_dtype
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
:meth:`extensions.register_extension_dtype`.
* construct_array_type
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
.. versionchanged:: 0.24.0
Added ``_metadata``, ``__hash__``, and changed the default definition
of ``__eq__``.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: Tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the
the attributes in ``self._metadata`` are equal between
`self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> Type:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> Optional[List[str]]:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls) -> Type["ExtensionArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
| bsd-3-clause |
CCS-Lab/hBayesDM | Python/hbayesdm/preprocess_funcs.py | 1 | 32027 | import os
import numpy as np
import pandas as pd
from hbayesdm.base import PATH_COMMON
def alt_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
blue_punish = np.full((n_subj, t_max), 0, dtype=float)
orange_punish = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
choice[s][:t] = subj_data['choice']
outcome[s][:t] = subj_data['outcome']
blue_punish[s][:t] = subj_data['bluepunish']
orange_punish[s][:t] = subj_data['orangepunish']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
'bluePunish': blue_punish,
'orangePunish': orange_punish,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def bandit2arm_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
choice[s][:t] = subj_data['choice']
outcome[s][:t] = subj_data['outcome']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def bandit4arm_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
rew = np.full((n_subj, t_max), 0, dtype=float)
los = np.full((n_subj, t_max), 0, dtype=float)
choice = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
rew[s][:t] = subj_data['gain']
los[s][:t] = -1 * np.abs(subj_data['loss']) # Use abs
choice[s][:t] = subj_data['choice']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'rew': rew,
'los': los,
'choice': choice,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def bandit4arm2_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
choice[s][:t] = subj_data['choice']
outcome[s][:t] = subj_data['outcome']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def bart_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
pumps = np.full((n_subj, t_max), 0, dtype=int)
explosion = np.full((n_subj, t_max), 0, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
pumps[s][:t] = subj_data['pumps']
explosion[s][:t] = subj_data['explosion']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'P': np.max(pumps) + 1,
'pumps': pumps,
'explosion': explosion,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def choiceRT_preprocess_func(self, raw_data, general_info, additional_args):
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
# Number of upper/lower boundary responses
Nu = np.full(n_subj, 0, dtype=int)
Nl = np.full(n_subj, 0, dtype=int)
# Write Nu, Nl
subj_group = iter(general_info['grouped_data'])
for s in range(n_subj):
_, subj_data = next(subj_group)
value_counts = subj_data['choice'].value_counts()
Nu[s] = value_counts.at[2]
Nl[s] = value_counts.at[1]
# Reaction-times for upper/lower boundary responses
RTu = np.full((n_subj, np.max(Nu)), -1, dtype=float)
RTl = np.full((n_subj, np.max(Nl)), -1, dtype=float)
# Write RTu, RTl
subj_group = iter(general_info['grouped_data'])
for s in range(n_subj):
_, subj_data = next(subj_group)
if Nu[s] > 0:
RTu[s][:Nu[s]] = subj_data['rt'][subj_data['choice'] == 2]
if Nl[s] > 0:
RTl[s][:Nl[s]] = subj_data['rt'][subj_data['choice'] == 1]
# Minimum reaction time
minRT = np.full(n_subj, -1, dtype=float)
# Write minRT
subj_group = iter(general_info['grouped_data'])
for s in range(n_subj):
_, subj_data = next(subj_group)
minRT[s] = min(subj_data['rt'])
# Use additional_args if provided
RTbound = additional_args.get('RTbound', 0.1)
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'Nu_max': np.max(Nu),
'Nl_max': np.max(Nl),
'Nu': Nu,
'Nl': Nl,
'RTu': RTu,
'RTl': RTl,
'minRT': minRT,
'RTbound': RTbound,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def choiceRT_single_preprocess_func(self, raw_data, general_info, additional_args):
# DataFrames per upper/lower boundary responses
df_upper = raw_data.loc[raw_data['choice'] == 2]
df_lower = raw_data.loc[raw_data['choice'] == 1]
# Number of upper/lower boundary responses
Nu = len(df_upper)
Nl = len(df_lower)
# Reaction-times for upper/lower boundary responses
RTu = df_upper['rt'].to_numpy()
RTl = df_lower['rt'].to_numpy()
# Minimum reaction time
minRT = min(raw_data['rt'])
# Use additional_args if provided
RTbound = additional_args.get('RTbound', 0.1)
# Wrap into a dict for pystan
data_dict = {
'Nu': Nu,
'Nl': Nl,
'RTu': RTu,
'RTl': RTl,
'minRT': minRT,
'RTbound': RTbound,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def cra_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, t_max), 0, dtype=int)
prob = np.full((n_subj, t_max), 0, dtype=float)
ambig = np.full((n_subj, t_max), 0, dtype=float)
reward_var = np.full((n_subj, t_max), 0, dtype=float)
reward_fix = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
choice[s][:t] = subj_data['choice']
prob[s][:t] = subj_data['prob']
ambig[s][:t] = subj_data['ambig']
reward_var[s][:t] = subj_data['rewardvar']
reward_fix[s][:t] = subj_data['rewardfix']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'prob': prob,
'ambig': ambig,
'reward_var': reward_var,
'reward_fix': reward_fix,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def dbdm_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
opt1hprob = np.full((n_subj, t_max), 0, dtype=float)
opt2hprob = np.full((n_subj, t_max), 0, dtype=float)
opt1hval = np.full((n_subj, t_max), 0, dtype=float)
opt1lval = np.full((n_subj, t_max), 0, dtype=float)
opt2hval = np.full((n_subj, t_max), 0, dtype=float)
opt2lval = np.full((n_subj, t_max), 0, dtype=float)
choice = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
opt1hprob[s][:t] = subj_data['opt1hprob']
opt2hprob[s][:t] = subj_data['opt2hprob']
opt1hval[s][:t] = subj_data['opt1hval']
opt1lval[s][:t] = subj_data['opt1lval']
opt2hval[s][:t] = subj_data['opt2hval']
opt2lval[s][:t] = subj_data['opt2lval']
choice[s][:t] = subj_data['choice']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'opt1hprob': opt1hprob,
'opt2hprob': opt2hprob,
'opt1hval': opt1hval,
'opt1lval': opt1lval,
'opt2hval': opt2hval,
'opt2lval': opt2lval,
'choice': choice,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def dd_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
delay_later = np.full((n_subj, t_max), 0, dtype=float)
amount_later = np.full((n_subj, t_max), 0, dtype=float)
delay_sooner = np.full((n_subj, t_max), 0, dtype=float)
amount_sooner = np.full((n_subj, t_max), 0, dtype=float)
choice = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
delay_later[s][:t] = subj_data['delaylater']
amount_later[s][:t] = subj_data['amountlater']
delay_sooner[s][:t] = subj_data['delaysooner']
amount_sooner[s][:t] = subj_data['amountsooner']
choice[s][:t] = subj_data['choice']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'delay_later': delay_later,
'amount_later': amount_later,
'delay_sooner': delay_sooner,
'amount_sooner': amount_sooner,
'choice': choice,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def dd_single_preprocess_func(self, raw_data, general_info, additional_args):
# Use general_info about raw_data
t_subjs = general_info['t_max'] # Note: use 't_max' not 't_subjs'
# Extract from raw_data
delay_later = raw_data['delaylater']
amount_later = raw_data['amountlater']
delay_sooner = raw_data['delaysooner']
amount_sooner = raw_data['amountsooner']
choice = raw_data['choice']
# Wrap into a dict for pystan
data_dict = {
'Tsubj': t_subjs,
'delay_later': delay_later,
'amount_later': amount_later,
'delay_sooner': delay_sooner,
'amount_sooner': amount_sooner,
'choice': choice,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def gng_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
cue = np.full((n_subj, t_max), 1, dtype=int)
pressed = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
cue[s][:t] = subj_data['cue']
pressed[s][:t] = subj_data['keypressed']
outcome[s][:t] = subj_data['outcome']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'cue': cue,
'pressed': pressed,
'outcome': outcome,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def igt_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
y_data = np.full((n_subj, t_max), -1, dtype=int)
rl_matrix = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
y_data[s][:t] = subj_data['choice']
rl_matrix[s][:t] = subj_data['gain'] - np.abs(subj_data['loss'])
# Use additional_args if provided
payscale = additional_args.get('payscale', 100)
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': y_data,
'outcome': rl_matrix / payscale,
'sign_out': np.sign(rl_matrix),
}
# Returned data_dict will directly be passed to pystan
return data_dict
def peer_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
condition = np.full((n_subj, t_max), 0, dtype=int)
p_gamble = np.full((n_subj, t_max), 0, dtype=float)
safe_Hpayoff = np.full((n_subj, t_max), 0, dtype=float)
safe_Lpayoff = np.full((n_subj, t_max), 0, dtype=float)
risky_Hpayoff = np.full((n_subj, t_max), 0, dtype=float)
risky_Lpayoff = np.full((n_subj, t_max), 0, dtype=float)
choice = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
condition[s][:t] = subj_data['condition']
p_gamble[s][:t] = subj_data['pgamble']
safe_Hpayoff[s][:t] = subj_data['safehpayoff']
safe_Lpayoff[s][:t] = subj_data['safelpayoff']
risky_Hpayoff[s][:t] = subj_data['riskyhpayoff']
risky_Lpayoff[s][:t] = subj_data['riskylpayoff']
choice[s][:t] = subj_data['choice']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'condition': condition,
'p_gamble': p_gamble,
'safe_Hpayoff': safe_Hpayoff,
'safe_Lpayoff': safe_Lpayoff,
'risky_Hpayoff': risky_Hpayoff,
'risky_Lpayoff': risky_Lpayoff,
'choice': choice,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def prl_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
choice[s][:t] = subj_data['choice']
outcome[s][:t] = np.sign(subj_data['outcome']) # Use sign
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def prl_multipleB_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_block_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
b_subjs = general_info['b_subjs']
b_max = general_info['b_max']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
choice = np.full((n_subj, b_max, t_max), -1, dtype=int)
outcome = np.full((n_subj, b_max, t_max), 0, dtype=float)
# Write from subj_block_data to the data arrays
for s in range(n_subj):
for b in range(b_subjs[s]):
_, subj_block_data = next(subj_block_group)
t = t_subjs[s][b]
choice[s][b][:t] = subj_block_data['choice']
outcome[s][b][:t] = np.sign(subj_block_data['outcome']) # Use sign
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'B': b_max,
'Bsubj': b_subjs,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def pst_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
option1 = np.full((n_subj, t_max), -1, dtype=int)
option2 = np.full((n_subj, t_max), -1, dtype=int)
choice = np.full((n_subj, t_max), -1, dtype=int)
reward = np.full((n_subj, t_max), -1, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
option1[s][:t] = subj_data['type'] // 10
option2[s][:t] = subj_data['type'] % 10
choice[s][:t] = subj_data['choice']
reward[s][:t] = subj_data['reward']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'option1': option1,
'option2': option2,
'choice': choice,
'reward': reward,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def ra_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
gain = np.full((n_subj, t_max), 0, dtype=float)
loss = np.full((n_subj, t_max), 0, dtype=float)
cert = np.full((n_subj, t_max), 0, dtype=float)
gamble = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
gain[s][:t] = subj_data['gain']
loss[s][:t] = np.abs(subj_data['loss']) # Use abs
cert[s][:t] = subj_data['cert']
gamble[s][:t] = subj_data['gamble']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'gain': gain,
'loss': loss,
'cert': cert,
'gamble': gamble,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def rdt_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
gain = np.full((n_subj, t_max), 0, dtype=float)
loss = np.full((n_subj, t_max), 0, dtype=float)
cert = np.full((n_subj, t_max), 0, dtype=float)
type = np.full((n_subj, t_max), -1, dtype=int)
gamble = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), 0, dtype=float)
happy = np.full((n_subj, t_max), 0, dtype=float)
RT_happy = np.full((n_subj, t_max), 0, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
gain[s][:t] = subj_data['gain']
loss[s][:t] = np.abs(subj_data['loss']) # Use abs
cert[s][:t] = subj_data['cert']
type[s][:t] = subj_data['type']
gamble[s][:t] = subj_data['gamble']
outcome[s][:t] = subj_data['outcome']
happy[s][:t] = subj_data['happy']
RT_happy[s][:t] = subj_data['rthappy']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'gain': gain,
'loss': loss,
'cert': cert,
'type': type,
'gamble': gamble,
'outcome': outcome,
'happy': happy,
'RT_happy': RT_happy,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def task2AFC_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
#Initialize (model-specific) data arrays
h = np.full((n_subj), 0, dtype = int)
f = np.full((n_subj), 0, dtype = int)
signal = np.full((n_subj), 0, dtype = int)
noise = np.full((n_subj), 0, dtype = int)
#Write data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
for stim in subj_data['stimulus']:
if stim == 1:
signal[s] += 1
elif stim == 0:
noise[s] += 1
for stim, resp in zip(subj_data['stimulus'], subj_data['response']):
if stim == 1 and resp == 1:
h[s] += 1
elif stim == 0 and resp == 1:
f[s] += 1
# Wrap into a dict for pystan
data_dict = {
'N' : n_subj,
'h' : h,
'f' : f,
'signal' : signal,
'noise' : noise,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def ts_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
level1_choice = np.full((n_subj, t_max), 1, dtype=int)
level2_choice = np.full((n_subj, t_max), 1, dtype=int)
reward = np.full((n_subj, t_max), 0, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
level1_choice[s][:t] = subj_data['level1choice']
level2_choice[s][:t] = subj_data['level2choice']
reward[s][:t] = subj_data['reward']
# Use additional_args if provided
trans_prob = additional_args.get('trans_prob', 0.7)
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'level1_choice': level1_choice,
'level2_choice': level2_choice,
'reward': reward,
'trans_prob': trans_prob,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def ug_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
# Initialize (model-specific) data arrays
offer = np.full((n_subj, t_max), 0, dtype=float)
accept = np.full((n_subj, t_max), -1, dtype=int)
# Write from subj_data to the data arrays
for s in range(n_subj):
_, subj_data = next(subj_group)
t = t_subjs[s]
offer[s][:t] = subj_data['offer']
accept[s][:t] = subj_data['accept']
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'offer': offer,
'accept': accept,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def wcs_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
# t_max = general_info['t_max']
t_max = 128
# Read from predefined answer sheet
answersheet = PATH_COMMON / 'extdata' / 'wcs_answersheet.txt'
answer = pd.read_csv(
answersheet, sep='\t', header=0, index_col=0).to_numpy() - 1
# Initialize data arrays
choice = np.full((n_subj, 4, t_max), 0, dtype=int)
outcome = np.full((n_subj, t_max), -1, dtype=int)
choice_match_att = np.full((n_subj, t_max, 1, 3), 0, dtype=int)
deck_match_rule = np.full((t_max, 3, 4), 0, dtype=float)
# Write choice, outcome, choice_match_att
for s in range(n_subj):
trials = t_subjs[s]
_, subj_data = next(subj_group)
subj_data_choice = subj_data['choice'].to_numpy() - 1
subj_data_outcome = subj_data['outcome'].to_numpy()
for t in range(trials):
c = subj_data_choice[t]
o = subj_data_outcome[t]
choice[s][c][t] = 1
outcome[s][t] = o
choice_match_att[s][t][0][:] = (c == answer[:, t])
# Write deck_match_rule
for t in range(t_max):
for r in range(3):
deck_match_rule[t][r][answer[r][t]] = 1
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'choice': choice,
'outcome': outcome,
'choice_match_att': choice_match_att,
'deck_match_rule': deck_match_rule,
}
# Returned data_dict will directly be passed to pystan
return data_dict
def cgt_preprocess_func(self, raw_data, general_info, additional_args):
# Iterate through grouped_data
subj_group = iter(general_info['grouped_data'])
# Use general_info(s) about raw_data
# subjs = general_info['subjs']
n_subj = general_info['n_subj']
t_subjs = general_info['t_subjs']
t_max = general_info['t_max']
uniq_bets = np.unique(raw_data['percentagestaked'])
n_bets = len(uniq_bets)
bets_asc = np.sort(uniq_bets / 100)
bets_dsc = np.flip(np.sort(uniq_bets / 100))
bet_delay = np.arange(n_bets) / 4
bet_time = raw_data['percentagestaked'] / 100
for b in range(n_bets):
bet_time[bet_time == bets_asc[b]] = b + 1
raw_data['bet_time'] = np.where(raw_data['gambletype'] == 0,
n_bets + 1 - bet_time,
bet_time)
col_chosen = np.full((n_subj, t_max), 0, dtype=int)
bet_chosen = np.full((n_subj, t_max), 0, dtype=int)
prop_red = np.full((n_subj, t_max), 0, dtype=float)
prop_chosen = np.full((n_subj, t_max), 0, dtype=float)
gain = np.full((n_subj, t_max, n_bets), 0, dtype=float)
loss = np.full((n_subj, t_max, n_bets), 0, dtype=float)
for s in range(n_subj):
t = t_subjs[s]
_, subj_data = next(subj_group)
col_chosen[s, :t] = np.where(subj_data['redchosen'] == 1, 1, 2)
bet_chosen[s, :t] = subj_data['bet_time']
prop_red[s, :t] = subj_data['nredboxes'] / 10
prop_chosen[s, :t] = np.where(subj_data['redchosen'] == 1,
prop_red[s][:t],
1 - prop_red[s][:t])
for b in range(n_bets):
gain[s, :t, b] = subj_data['trialinitialpoints'] / 100 \
+ subj_data['trialinitialpoints'] / 100 \
* np.where(subj_data['gambletype'] == 1,
bets_asc[b],
bets_dsc[b])
loss[s, :t, b] = subj_data['trialinitialpoints'] / 100 \
- subj_data['trialinitialpoints'] / 100 \
* np.where(subj_data['gambletype'] == 1,
bets_asc[b],
bets_dsc[b])
# Remove the unnecessary intermediate column
raw_data.drop(columns='bet_time', inplace=True)
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'B': n_bets,
'Tsubj': t_subjs,
'bet_delay': bet_delay,
'gain': gain,
'loss': loss,
'prop_red': prop_red,
'prop_chosen': prop_chosen,
'col_chosen': col_chosen,
'bet_chosen': bet_chosen
}
# Returned data_dict will directly be passed to pystan
return data_dict
| gpl-3.0 |
yutiansut/QUANTAXIS | QUANTAXIS/QAMarket/QAShipaneBroker.py | 2 | 15645 | # coding:utf-8
import asyncio
import base64
import configparser
import datetime
import json
import os
import urllib
import pandas as pd
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from QUANTAXIS.QAEngine.QAEvent import QA_Event
from QUANTAXIS.QAMarket.common import (
cn_en_compare,
order_status_cn_en,
trade_towards_cn_en
)
from QUANTAXIS.QAMarket.QABroker import QA_Broker
from QUANTAXIS.QAMarket.QAOrderHandler import QA_OrderHandler
from QUANTAXIS.QAUtil.QADate import QA_util_date_int2str
from QUANTAXIS.QAUtil.QADate_trade import QA_util_get_order_datetime
from QUANTAXIS.QAUtil.QAParameter import (
BROKER_EVENT,
BROKER_TYPE,
ORDER_DIRECTION,
ORDER_MODEL,
ORDER_STATUS
)
from QUANTAXIS.QAUtil.QASetting import setting_path, QA_Setting
DEFAULT_SHIPANE_URL = 'http://127.0.0.1:8888'
DEFAULT_SHIPANE_KEY = ''
class SPE_CONFIG():
def __init__(self, uri=DEFAULT_SHIPANE_URL, key=DEFAULT_SHIPANE_KEY):
self.key = key
self.uri = uri
def get_config_SPE():
config = configparser.ConfigParser()
return SPE_CONFIG(
QA_Setting().get_config('SPE',
'uri',
DEFAULT_SHIPANE_URL),
QA_Setting().get_config('SPE',
'key',
DEFAULT_SHIPANE_KEY)
)
class QA_SPEBroker(QA_Broker):
"""
1. 查询账户:
如果有该账户, 返回可用资金和持仓
如果当前market不存在或异常, 返回False
2. 查询所有订单:
如果成功 返回一个DataFrame
如果失败 返回False
3. 查询未成交订单
如果成功 返回DataFrame
如果失败 返回False
4. 查询已成交订单
如果成功 返回DataFramne
如果失败 返回False
5. 下单 receive_order/send_order
receive_order(QAMARKET 用法):
输入一个QA_ORDER类
如果下单成功 返回带realorder_id, ORDER_STATUS.QUEUED状态值 的QA_Order
如果下单失败 返回带 ORDER_STATUS.FAILED状态值的 QA_Order
send_order(测试用法)
6. 撤单 cancel_order
如果撤单成功 返回 True
如果撤单失败 返回 具体的原因 dict/json格式
7. 全撤
如果成功 返回True
"""
def __init__(self):
super().__init__()
self.name = BROKER_TYPE.SHIPANE
self.order_handler = QA_OrderHandler()
self.setting = get_config_SPE()
self._session = requests
self._endpoint = self.setting.uri
self.key = self.setting.key
#self.account_headers = ['forzen_cash','balance_available','cash_available','pnl_money_today','total_assets','pnl_holding','market_value','money_available']
def run(self, event):
if event.event_type is BROKER_EVENT.RECEIVE_ORDER:
self.order_handler.run(event)
elif event.event_type is BROKER_EVENT.SETTLE:
self.order_handler.run(event)
if event.callback:
event.callback('settle')
def call(self, func, params=''):
try:
if self.key == '':
uri = '{}/api/v1.0/{}?client={}'.format(
self._endpoint,
func,
params.pop('client')
)
else:
uri = '{}/api/v1.0/{}?key={}&client={}'.format(
self._endpoint,
func,
self.key,
params.pop('client')
)
# print(uri)
response = self._session.get(uri, params)
text = response.text
return json.loads(text)
except Exception as e:
# print(e)
if isinstance(e, ConnectionRefusedError):
print('与主机失去连接')
print(e)
else:
print(e)
# print(uri)
return None
def call_post(self, func, params={}):
if self.key == '':
uri = '{}/api/v1.0/{}?client={}'.format(
self._endpoint,
func,
params.pop('client')
)
else:
uri = '{}/api/v1.0/{}?key={}&client={}'.format(
self._endpoint,
func,
self.key,
params.pop('client')
)
response = self._session.post(uri, json=params)
text = response.text
return json.loads(text)
def call_delete(self, func, params=''):
if self.key == '':
uri = '{}/api/v1.0/{}?client={}'.format(
self._endpoint,
func,
params.pop('client')
)
else:
uri = '{}/api/v1.0/{}?key={}&client={}'.format(
self._endpoint,
func,
self.key,
params.pop('client')
)
response = self._session.delete(uri)
text = response.text
# print(text)
try:
if text in ['', '获取提示对话框超时,因为:组件为空']:
print('success')
return True
else:
return json.loads(text)
except:
return text
def data_to_df(self, result):
return pd.DataFrame(data=result)
#------ functions
def ping(self):
return self.call("ping", {})
def query_accounts(self, accounts):
return self.call("accounts", {'client': accounts})
def query_positions(self, accounts):
"""查询现金和持仓
Arguments:
accounts {[type]} -- [description]
Returns:
dict-- {'cash_available':xxx,'hold_available':xxx}
"""
try:
data = self.call("positions", {'client': accounts})
if data is not None:
cash_part = data.get('subAccounts', {}).get('人民币', False)
if cash_part:
cash_available = cash_part.get('可用金额', cash_part.get('可用'))
position_part = data.get('dataTable', False)
if position_part:
res = data.get('dataTable', False)
if res:
hold_headers = res['columns']
hold_headers = [
cn_en_compare[item] for item in hold_headers
]
hold_available = pd.DataFrame(
res['rows'],
columns=hold_headers
)
if len(hold_available) == 1 and hold_available.amount[0] in [
None,
'',
0
]:
hold_available = pd.DataFrame(
data=None,
columns=hold_headers
)
return {
'cash_available':
cash_available,
'hold_available':
hold_available.assign(
amount=hold_available.amount.apply(float)
).loc[:,
['code',
'amount']].set_index('code').amount
}
else:
print(data)
return False, 'None ACCOUNT'
except:
return False
def query_clients(self):
"""查询clients
Returns:
[type] -- [description]
"""
try:
data = self.call("clients", {'client': 'None'})
if len(data) > 0:
return pd.DataFrame(data).drop(
['commandLine',
'processId'],
axis=1
)
else:
return pd.DataFrame(
None,
columns=[
'id',
'name',
'windowsTitle',
'accountInfo',
'status'
]
)
except Exception as e:
return False, e
def query_orders(self, accounts, status='filled'):
"""查询订单
Arguments:
accounts {[type]} -- [description]
Keyword Arguments:
status {str} -- 'open' 待成交 'filled' 成交 (default: {'filled'})
Returns:
[type] -- [description]
"""
try:
data = self.call("orders", {'client': accounts, 'status': status})
if data is not None:
orders = data.get('dataTable', False)
order_headers = orders['columns']
if ('成交状态' in order_headers
or '状态说明' in order_headers) and ('备注' in order_headers):
order_headers[order_headers.index('备注')] = '废弃'
order_headers = [cn_en_compare[item] for item in order_headers]
order_all = pd.DataFrame(
orders['rows'],
columns=order_headers
).assign(account_cookie=accounts)
order_all.towards = order_all.towards.apply(
lambda x: trade_towards_cn_en[x]
)
if 'order_time' in order_headers:
# 这是order_status
order_all['status'] = order_all.status.apply(
lambda x: order_status_cn_en[x]
)
if 'order_date' not in order_headers:
order_all.order_time = order_all.order_time.apply(
lambda x: QA_util_get_order_datetime(
dt='{} {}'.format(datetime.date.today(),
x)
)
)
else:
order_all = order_all.assign(
order_time=order_all.order_date
.apply(QA_util_date_int2str) + ' ' +
order_all.order_time
)
if 'trade_time' in order_headers:
order_all.trade_time = order_all.trade_time.apply(
lambda x: '{} {}'.format(datetime.date.today(),
x)
)
if status == 'filled':
return order_all.loc[:,
self.dealstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
return order_all.loc[:,
self.orderstatus_headers].set_index(
['account_cookie',
'realorder_id']
).sort_index()
else:
print('response is None')
return False
except Exception as e:
print(e)
return False
def send_order(
self,
accounts,
code='000001',
price=9,
amount=100,
order_direction=ORDER_DIRECTION.BUY,
order_model=ORDER_MODEL.LIMIT
):
"""[summary]
Arguments:
accounts {[type]} -- [description]
code {[type]} -- [description]
price {[type]} -- [description]
amount {[type]} -- [description]
Keyword Arguments:
order_direction {[type]} -- [description] (default: {ORDER_DIRECTION.BUY})
order_model {[type]} -- [description] (default: {ORDER_MODEL.LIMIT})
priceType 可选择: 上海交易所:
0 - 限价委托
4 - 五档即时成交剩余撤销
6 - 五档即时成交剩余转限
深圳交易所:
0 - 限价委托
1 - 对手方最优价格委托
2 - 本方最优价格委托
3 - 即时成交剩余撤销委托
4 - 五档即时成交剩余撤销
5 - 全额成交或撤销委托
Returns:
[type] -- [description]
"""
try:
#print(code, price, amount)
return self.call_post(
'orders',
{
'client': accounts,
"action": 'BUY' if order_direction == 1 else 'SELL',
"symbol": code,
"type": order_model,
"priceType": 0 if order_model == ORDER_MODEL.LIMIT else 4,
"price": price,
"amount": amount
}
)
except json.decoder.JSONDecodeError:
print(RuntimeError('TRADE ERROR'))
return None
def cancel_order(self, accounts, orderid):
return self.call_delete(
'orders/{}'.format(orderid),
{'client': accounts}
)
def cancel_all(self, accounts):
return self.call_delete('orders', {'client': accounts})
def receive_order(self, event):
order = event.order
res = self.send_order(
accounts=order.account_cookie,
code=order.code,
price=order.price,
amount=order.amount,
order_direction=order.towards,
order_model=order.order_model
)
try:
# if res is not None and 'id' in res.keys():
# order.status = ORDER_STATUS.QUEUED
# order.text = 'SUCCESS'
order.queued(realorder_id=res['id'])
print('success receive order {}'.format(order.realorder_id))
return order
# else:
except:
text = 'WRONG' if res is None else res.get('message', 'WRONG')
order.failed(text)
print(
'FAILED FOR CREATE ORDER {} {}'.format(
order.account_cookie,
order.status
)
)
print(res)
return order
#self.dealer.deal(order, self.market_data)
if __name__ == '__main__':
a = QA_SPEBroker()
print(a.query_clients())
print('查询账户')
acc = 'account:1391'
print(a.query_positions(acc))
print('查询所有订单')
print(a.query_orders(acc, ''))
print('查询未成交订单')
print(a.query_orders(acc, 'open'))
print('查询已成交订单')
print(a.query_orders(acc, 'filled'))
# """多账户同时下单测试
# """
# print('下单测试')
# res = a.send_order(acc, price=9)
# #a.send_order(acc, price=9)
# #a.send_order(acc, price=9)
# # print(res)
# print('查询新的未成交订单')
# print(a.query_orders(acc, 'open'))
# print('撤单')
# print(a.cancel_order(acc, res['id']))
# print('查询已成交订单')
# print(a.query_orders(acc, 'filled'))
# # print(a.send_order('account:141',price=8.95))
# print('一键全部撤单')
# print(a.cancel_all(acc))
# print(a.cancel_order('account:141', '1703'))
| mit |
dsquareindia/scikit-learn | sklearn/datasets/base.py | 13 | 29166 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description : string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_data(module_path, data_file_name):
"""Loads data from module_path/data/data_file_name.
Parameters
----------
data_file_name : String. Name of csv file to be loaded from
module_path/data/data_file_name. For example 'wine_data.csv'.
Returns
-------
data : Numpy Array
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : Numpy Array
A 1D array holding target variables for all the samples in `data.
For example target[0] is the target varible for data[0].
target_names : Numpy Array
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
"""
with open(join(module_path, 'data', data_file_name)) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
return data, target, target_names
def load_wine(return_X_y=False):
"""Load and return the wine dataset (classification).
.. versionadded:: 0.18
The wine dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class [59,71,48]
Samples total 178
Dimensionality 13
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
The copy of UCI ML Wine Data Set dataset is
downloaded and modified to fit standard format from:
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
Examples
--------
Let's say you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'wine_data.csv')
with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['alcohol',
'malic_acid',
'ash',
'alcalinity_of_ash',
'magnesium',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color_intensity',
'hue',
'od280/od315_of_diluted_wines',
'proline'])
def load_iris(return_X_y=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'iris.csv')
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer(return_X_y=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Parameters
----------
return_X_y : boolean, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'breast_cancer.csv')
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10, return_X_y=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(np.int)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
if return_X_y:
return flat_data, target
return Bunch(data=flat_data,
target=target,
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes(return_X_y=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
if return_X_y:
return data, target
return Bunch(data=data, target=target,
feature_names=['age', 'sex', 'bmi', 'bp',
's1', 's2', 's3', 's4', 's5', 's6'])
def load_linnerud(return_X_y=False):
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
if return_X_y:
return data_exercise, data_physiological
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston(return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
geomagpy/MARTAS | oldstuff/UtilityScripts/testserial.py | 3 | 1861 | #!/usr/bin/env python
from __future__ import print_function
import sys, time, os, socket
import serial
import struct, binascii, re, csv
from datetime import datetime, timedelta
from matplotlib.dates import date2num, num2date
import numpy as np
import time
port = '/dev/ttyS0'
baudrate='115200'
eol = '\r'
def lineread(ser,eol):
# FUNCTION 'LINEREAD'
# Does the same as readline(), but does not require a standard
# linebreak character ('\r' in hex) to know when a line ends.
# Variable 'eol' determines the end-of-line char: '\x00'
# for the POS-1 magnetometer, '\r' for the envir. sensor.
# (Note: required for POS-1 because readline() cannot detect
# a linebreak and reads a never-ending line.)
ser_str = ''
timeout = time.time()+2
while True:
char = ser.read()
if char == eol:
break
if time.time() > timeout:
break
ser_str += char
return ser_str
def send_command(ser,command,eol,hex=False):
command = eol+command+eol
#print 'Command: %s \n ' % command.replace(eol,'')
sendtime = date2num(datetime.utcnow())
#print "Sending"
ser.write(command)
#print "Received something - interpretation"
response = lineread(ser,eol)
#print "interprete"
receivetime = date2num(datetime.utcnow())
meantime = np.mean([receivetime,sendtime])
#print "Timediff", (receivetime-sendtime)*3600*24
return response, num2date(meantime).replace(tzinfo=None)
ser = serial.Serial(port, baudrate=baudrate , parity='N', bytesize=8, stopbits=1, timeout=2)
for i in range(99):
call = str(i).zfill(2)+'TR00002'
print(call)
answer, actime = send_command(ser,call,eol)
print(answer)
| gpl-3.0 |
khkaminska/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
fumitoh/modelx | modelx/tests/serialize/test_ziputil.py | 1 | 5786 | import zipfile
import filecmp
import pickle
from modelx.serialize import ziputil
import pytest
from itertools import product
def sample_root(path):
return path / "rootルート", path / "rootルート.zip", path / "rootルートext"
def sample_path(root):
return tuple(r / "abc漢字" / "fileファイル" for r in root)
def sample_pandas():
import pandas as pd
import numpy as np
df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
series = pd.Series([1, 3, 5, np.nan, 6, 8])
return df, series
def test_write_str(tmp_path):
root, zip_root, ext_root = sample_root(tmp_path)
path, zip_path, ext_path = sample_path(sample_root(tmp_path))
ziputil.make_root(root, is_zip=False)
ziputil.make_root(zip_root, is_zip=True,
compression=zipfile.ZIP_STORED)
text = "Hello! Привет こんにちは 你好\n"
ziputil.write_str_utf8(text, path)
ziputil.write_str_utf8(text, zip_path,
compression=zipfile.ZIP_STORED)
with zipfile.ZipFile(zip_root) as testzip:
testzip.extractall(ext_root)
assert filecmp.cmp(path, ext_path, shallow=False)
@pytest.mark.parametrize("pdobj", sample_pandas())
def test_pandas_to_pickle(tmp_path, pdobj):
root, zip_root, ext_root = sample_root(tmp_path)
path, zip_path, ext_path = sample_path(sample_root(tmp_path))
ziputil.make_root(root, is_zip=False)
ziputil.make_root(zip_root, is_zip=True, compression=zipfile.ZIP_STORED)
ziputil.pandas_to_pickle(pdobj, path)
ziputil.pandas_to_pickle(pdobj, zip_path, compression=zipfile.ZIP_STORED)
with zipfile.ZipFile(zip_root) as testzip:
testzip.extractall(ext_root)
assert filecmp.cmp(path, ext_path, shallow=False)
@pytest.mark.parametrize("mode, encoding, newline, compression, compresslevel",
[["b", None, None, zipfile.ZIP_DEFLATED, None],
["t", "utf-8", None, zipfile.ZIP_DEFLATED, 9], # Error on encoding==None
["t", "utf-8", "\n", zipfile.ZIP_STORED, None]])
def test_write_file(
tmp_path, mode, encoding, newline, compression, compresslevel):
root, zip_root, ext_root = sample_root(tmp_path)
path, zip_path, ext_path = sample_path(sample_root(tmp_path))
ziputil.make_root(root, is_zip=False,
compression=compression, compresslevel=compresslevel)
ziputil.make_root(zip_root, is_zip=True,
compression=compression, compresslevel=compresslevel)
data = {'a': [1, 2, 3], 'b': 4, 'c': '漢字'}
if mode == "b":
def callback(f):
pickle.dump(data, f)
else:
def callback(f):
for k, v in data.items():
f.write("(%s, %s)\n" % (k, v))
ziputil.write_file(callback, path, mode=mode, encoding=encoding, newline=newline)
ziputil.write_file(callback, zip_path, mode=mode, encoding=encoding, newline=newline,
compression=zipfile.ZIP_STORED)
with zipfile.ZipFile(zip_root) as testzip:
testzip.extractall(ext_root)
assert filecmp.cmp(path, ext_path, shallow=False)
def test_read_str(tmp_path):
root, zip_root, _ = sample_root(tmp_path)
path, zip_path, _ = sample_path(sample_root(tmp_path))
ziputil.make_root(root, is_zip=False)
ziputil.make_root(zip_root, is_zip=True, compression=zipfile.ZIP_STORED)
text = "Hello! Привет こんにちは 你好\n"
ziputil.write_str_utf8(text, path)
ziputil.write_str_utf8(text, zip_path, compression=zipfile.ZIP_STORED)
assert ziputil.read_str_utf8(path) == text
assert ziputil.read_str_utf8(zip_path) == text
@pytest.mark.parametrize("mode, encoding, newline",
[["b", None, None],
["t", None, None],
["t", "utf-8", "\n"]])
def test_read_file(tmp_path, mode, encoding, newline):
root, zip_root, _ = sample_root(tmp_path)
path, zip_path, _ = sample_path(sample_root(tmp_path))
ziputil.make_root(root, is_zip=False)
ziputil.make_root(zip_root, is_zip=True, compression=zipfile.ZIP_STORED)
data = {'a': [1, 2, 3], 'b': 4}
if mode == "b":
def callback(f):
pickle.dump(data, f)
def load(f):
return pickle.load(f)
result = data
else:
def callback(f):
for k, v in data.items():
f.write("(%s, %s)\n" % (k, v))
def load(f):
return f.read()
result = "".join(["(%s, %s)\n" % (k, v) for k, v in data.items()])
ziputil.write_file(callback, path, mode)
ziputil.write_file(callback, zip_path, mode,
compression=zipfile.ZIP_STORED)
assert ziputil.read_file(load, zip_path, mode) == result
assert ziputil.read_file(load, path, mode) == result
@pytest.mark.parametrize("is_src_zip, is_dst_zip",
list(product((True, False), (True, False))))
def test_copy_file(tmp_path, is_src_zip, is_dst_zip):
src_root = tmp_path / "src"
dst_root = tmp_path / "dst"
src = src_root / "abc漢字" / "fileファイル"
dst = dst_root / "fileファイル"
ziputil.make_root(src_root, is_zip=is_src_zip,
compression=zipfile.ZIP_STORED)
ziputil.make_root(dst_root, is_zip=is_dst_zip,
compression=zipfile.ZIP_STORED)
text = "Hello! Привет こんにちは 你好\n"
ziputil.write_str_utf8(text, src,
compression=zipfile.ZIP_STORED)
ziputil.copy_file(src, dst,
compression=zipfile.ZIP_DEFLATED,
compresslevel=None)
assert ziputil.read_str_utf8(dst) == text
| gpl-3.0 |
ndingwall/scikit-learn | doc/conf.py | 5 | 17517 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import re
from packaging.version import parse
from pathlib import Path
from io import StringIO
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
'add_toctree_functions',
'sphinx-prompt',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
mathjax_path = ''
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdn.jsdelivr.net/npm/mathjax@3/es5/'
'tex-chtml.js')
autodoc_default_options = {
'members': True,
'inherited-members': True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = 'scikit-learn'
copyright = '2007 - 2020, scikit-learn developers (BSD License)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
release = parsed_version.base_version
else:
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'literal'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn-modern'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'google_analytics': True,
'mathjax_path': mathjax_path}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'index.html',
'documentation': 'documentation.html'} # redirects to index
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# If true, the reST sources are included in the HTML build as _sources/name.
html_copy_source = True
# Adds variables into templates
html_context = {}
# finds latest release highlights and places it into HTML context for
# index.html
release_highlights_dir = Path("..") / "examples" / "release_highlights"
# Finds the highlight with the latest version number
latest_highlights = sorted(release_highlights_dir.glob(
"plot_release_highlights_*.py"))[-1]
latest_highlights = latest_highlights.with_suffix('').name
html_context["release_highlights"] = \
f"auto_examples/release_highlights/{latest_highlights}"
# get version from higlight name assuming highlights have the form
# plot_release_highlights_0_22_0
highlight_version = ".".join(latest_highlights.split("_")[-3:-1])
html_context["release_highlights_version"] = highlight_version
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
\let\oldhref\href
\renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('contents', 'user_guide.tex', 'scikit-learn user guide',
'scikit-learn developers', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
'seaborn': ('https://seaborn.pydata.org/', None),
}
v = parse(release)
if v.release is None:
raise ValueError(
'Ill-formed version: {!r}. Version should follow '
'PEP440'.format(version))
if v.is_devrelease:
binder_branch = 'master'
else:
major, minor = v.release[:2]
binder_branch = '{}.{}.X'.format(major, minor)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, 'r') as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'show_memory': False,
'reference_url': {
'sklearn': None},
'examples_dirs': ['../examples'],
'gallery_dirs': ['auto_examples'],
'subsection_order': SubSectionTitleOrder('../examples'),
'binder': {
'org': 'scikit-learn',
'repo': 'scikit-learn',
'binderhub_url': 'https://mybinder.org',
'branch': binder_branch,
'dependencies': './binder/requirements.txt',
'use_jupyter_lab': True
},
# avoid generating too many cross links
'inspect_global_variables': False,
'remove_config_comments': True,
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_halving_search_cv # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != 'html':
return
print('Removing methods from search index')
searchindex_path = os.path.join(app.builder.outdir, 'searchindex.js')
with open(searchindex_path, 'r') as f:
searchindex_text = f.read()
searchindex_text = re.sub(r'{__init__.+?}', '{}', searchindex_text)
searchindex_text = re.sub(r'{__call__.+?}', '{}', searchindex_text)
with open(searchindex_path, 'w') as f:
f.write(searchindex_text)
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package)
for package in dependent_packages) + 4
version_header_len = len('Minimum Version') + 4
tags_header_len = max(len(tags)
for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(f'{dependency_title:<{package_header_len}} '
f'{version_title:<{version_header_len}} '
f'{tags_title}\n')
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
for package, (version, tags) in dependent_packages.items():
output.write(f'{package:<{package_header_len}} '
f'{version:<{version_header_len}} '
f'{tags}\n')
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
output = output.getvalue()
with (Path('.') / 'min_dependency_table.rst').open('w') as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f'.. |{package}MinVersion| replace:: {version}')
output.write('\n')
output = output.getvalue()
with (Path('.') / 'min_dependency_substitutions.rst').open('w') as f:
f.write(output)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = 'scikit-learn/scikit-learn'
# Hack to get kwargs to appear in docstring #18434
# TODO: Remove when https://github.com/sphinx-doc/sphinx/pull/8234 gets
# merged
from sphinx.util import inspect # noqa
from sphinx.ext.autodoc import ClassDocumenter # noqa
class PatchedClassDocumenter(ClassDocumenter):
def _get_signature(self):
old_signature = inspect.signature
def patch_signature(subject, bound_method=False, follow_wrapped=True):
# changes the default of follow_wrapped to True
return old_signature(subject, bound_method=bound_method,
follow_wrapped=follow_wrapped)
inspect.signature = patch_signature
result = super()._get_signature()
inspect.signature = old_signature
return result
def setup(app):
app.registry.documenters['class'] = PatchedClassDocumenter
app.connect('builder-inited', generate_min_dependency_table)
app.connect('builder-inited', generate_min_dependency_substitutions)
# to hide/show the prompt in code examples:
app.connect('build-finished', make_carousel_thumbs)
app.connect('build-finished', filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
| bsd-3-clause |
seandavi/vcfanno | scripts/paper/chunk-gap-plot.py | 2 | 2020 | import sys
import re
import numpy as np
from collections import defaultdict
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style("white")
colors = sns.set_palette('Set1', 8)
colors = sns.color_palette('Set1', 3)
f, axes = plt.subplots(1, figsize=(4, 2))
axes = (axes,)
# run as python chunk-gap-plot.py 1kg.times-tails.fmt.txt exac.times-tails.txt
for i, f in enumerate(sys.argv[1:3]):
if i == 0:
assert "1kg" in f.lower()
else:
assert "exac" in f.lower()
groups = defaultdict(list)
for line in open(f):
gap, chunk, procs, info = re.split("\s+", line, 3)
if not int(chunk) in (1000, 10000, 100000): continue
seconds = re.search("in (.+) seconds", info).groups(0)[0]
if gap == '100' or chunk == '100': continue
if int(procs) != 4: continue
groups[(int(gap), int(chunk))].append(float(seconds))
bychunk = defaultdict(list)
for gap, chunk in groups:
#if chunk != 5000: continue
m = np.mean(groups[(gap, chunk)])
bychunk[chunk].append((gap, m))
label = "ExAC" if i == 1 else "1KG"
marker = "o" if label == "ExAC" else "s"
for j, (chunk, vals) in enumerate(sorted(bychunk.items())):
vals.sort()
xs, ys = zip(*vals)
plabel = "%d : %s" % (chunk, label)
if i == 1:
plabel = label
axes[0].plot(xs, ys, color=colors[j], ls="--" if label == "ExAC" else
"-", label=plabel) #, marker=marker)
if i == 0:
axes[0].set_xlabel("Gap size")
axes[0].set_ylabel("Time (seconds)")
sns.despine()
plt.legend(ncol=2, markerfirst=False, title="Chunk size",
loc=(axes[0].get_position().x1-0.45, axes[0].get_position().y1 - 0.085))
ax = plt.gca()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(7)
for item in ax.get_legend().get_texts():
item.set_fontsize(5)
plt.savefig('figure-5.pdf')
plt.show()
| mit |
Garrett-R/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
lkishline/expyfun | examples/level_test.py | 1 | 1979 | """
============================================
Sound level test and visual size calibration
============================================
This example tests the audio level and video size. For audio, it produces a 65
db SPL 1000 Hz tone (note that at 1000 Hz, the frequency weighting for SPL
measurement shouldn't matter). For video, it produces a square that should be
10 degrees visual angle and tells you what the physical width should be in cm.
This of course depends on correct settings for monitor width, resolution, and
distance.
"""
# Author: Ross Maddox <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from expyfun import ExperimentController
from expyfun.visual import Rectangle
import expyfun.analyze as ea
print(__doc__)
with ExperimentController('LevelTest', full_screen=True, noise_db=-np.inf,
participant='s', session='0', output_dir=None,
suppress_resamp=True, check_rms=None,
stim_db=65) as ec:
tone = (0.01 * np.sqrt(2.) *
np.sin(2 * np.pi * 1000. * np.arange(0, 10, 1. / ec.fs)))
assert np.allclose(np.sqrt(np.mean(tone * tone)), 0.01)
square = Rectangle(ec, (0, 0, 10, 10), units='deg', fill_color='r')
cm = np.diff(ec._convert_units([[0, 5], [0, 5]], 'deg', 'pix'),
axis=-1)[0] / ec.dpi / 0.39370
ec.load_buffer(tone) # RMS == 0.01
pressed = None
screenshot = None
while pressed != '8': # enable a clean quit if required
square.draw()
ec.screen_text('Width: {} cm'.format(round(2 * cm, 1)), wrap=False)
screenshot = ec.screenshot() if screenshot is None else screenshot
t1 = ec.start_stimulus(start_of_trial=False) # skip checks
pressed = ec.wait_one_press(10)[0]
ec.flip()
ec.wait_one_press(0.5)
ec.stop()
plt.ion()
ea.plot_screen(screenshot)
| bsd-3-clause |
gdetor/SI-RF-Structure | Statistics/rf-matrix.py | 1 | 4982 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script computes illustrates some of the ncRFS annotated by the user.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.patches import Rectangle
import matplotlib.patheffects as PathEffects
if __name__=='__main__':
n, m = 32, 25
x, y = 22, 25
RFs = np.load('cleared-rfs.npy').reshape(n,n,m,m)
fg = 0.0,0.0,0.0
bg = 1.0,1.0,1.0
matplotlib.rcParams['ytick.major.size'] = 0
matplotlib.rcParams['ytick.minor.size'] = 9
matplotlib.rcParams['xtick.major.width'] = .5
matplotlib.rcParams['ytick.major.width'] = 0
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rcParams['font.size'] = 12.0
matplotlib.rc('axes', facecolor = bg)
matplotlib.rc('axes', edgecolor = fg)
matplotlib.rc('xtick', color = fg)
matplotlib.rc('ytick', color = fg)
matplotlib.rc('figure', facecolor = bg)
matplotlib.rc('savefig', facecolor = bg)
plt.figure( figsize=(10,10) )
# plt.subplots_adjust( wspace=0.7, hspace=0.7 )
# indices = [ ( 3,18), ( 3,11), (29,16),
# ( 6, 1), (22,21), ( 2, 7),
# (19,26), ( 7,20), (21, 2)]
# indices = [(3, 18) , (26, 18) , (10, 7) , (25, 11) , (3, 21) , (8, 11) , (21, 14) , (20, 16) , (8, 19) , (16, 5) , (0, 9) , (17, 15) , (7, 20) , (20, 0) , (27, 19) , (4, 24) ]
# indices = [(a,b) for (a,b) in np.random.randint(0,32,(32,2))]
# indices = [(2, 12) , (5, 9) , (1, 17) , (9, 18) , (2, 14) , (31, 11) , (2, 30) , (5, 16) , (12, 2) , (9, 9) , (24, 22) , (24, 13) , (23, 29) , (30, 6) , (19, 20) , (24, 19)]
indices = [(10, 21) , (29, 16) , (28, 14) , (20, 17) , (13, 19) , (3, 15) , (23, 18) , (0, 18) , (8, 31) , (16, 11) , (0, 20) , (24, 13) , (11, 2) , (1, 1) , (19, 20) , (2, 21)]
vmin=vmax=0
for i in range(4):
for j in range(4):
index = i*4+j
y,x = indices[index]
RF = RFs[y,x]
vmin = min(vmin,RF.min())
vmax = max(vmax,RF.max())
for i in range(4):
for j in range(4):
index = i*4+j
y,x = indices[index]
RF = RFs[y,x]
# s0,s1 = np.unravel_index(np.argmax(RF),RF.shape)
# RF = np.roll(RF,12-s0,axis=0)
# RF = np.roll(RF,12-s1,axis=1)
vmin, vmax = RF.min(), RF.max()
plt.subplot2grid((4,4),(i,j),rowspan=1,colspan=1)
plt.imshow( RF, interpolation='nearest', cmap=plt.cm.gray_r, origin='lower',
vmin=vmin, vmax=vmax)
plt.axis([0,RFs.shape[2]-1,0,RFs.shape[2]-1])
plt.xticks([]), plt.yticks([])
plt.text(1,1,'%c' % (ord('A')+index), weight='bold', fontsize=20, color='w',
path_effects=[PathEffects.withStroke(linewidth=1.5, foreground="k", alpha=.5)])
print vmin,vmax
plt.savefig('matrix-rfs.pdf', dpi=100 )
plt.show()
| gpl-3.0 |
bmazin/ARCONS-pipeline | photometry/PSF_Popup.py | 1 | 5647 |
#LightCurve Popup for checking lightcurve, PSF fits, etc.
from functools import partial
import numpy as np
import matplotlib
from util.popup import *
from util.fitFunctions import model_list
from photometry.LightCurve import LightCurve
from photometry.plot3DImage import plot3DImage
def clickCanvas(self,LC,event):
if event.inaxes is self.axes and self.mpl_toolbar._active is None:
time = LC.photometry_dict['startTimes']
closest_time_ind = np.argmin(np.abs(time - event.xdata))
#print event.xdata, ' --> ', time[closest_time_ind]
pop=PopUp(parent=self,title='JD: '+str(time[closest_time_ind])+' PSF fit')
pop_image(pop,LC,closest_time_ind)
pop.show()
pop=PopUp(parent=self,title='JD: '+str(time[closest_time_ind])+' Fit Residual')
pop_residualImage(pop,LC,closest_time_ind)
pop.show()
pop=PopUp(parent=self,title='JD: '+str(time[closest_time_ind])+' 2D Image')
pop_2DImage(pop,LC,closest_time_ind)
pop.show()
print 'image:',closest_time_ind
#print 'centroid:',LC.centroids[closest_time_ind]
def pop_image(self,LC,ind,model='multiple_2d_circ_gauss_func'):
image = LC.im_dict['images'][ind]
image[np.invert(np.isfinite(image))]=0.
errs = np.sqrt(image)
errs[np.where(image==0.)]=np.inf
parameters = LC.photometry_dict['parameters'][ind]
models = model_list[model](parameters)(p=np.ones(len(parameters)),data=image,return_models=True)
guess = models[0]
for m in models[1:]:
guess+=m
plot3DImage(self.fig,self.axes,image,errs=errs,fit=guess)
def pop_residualImage(self,LC,ind,model='multiple_2d_circ_gauss_func'):
image = LC.im_dict['images'][ind]
image[np.invert(np.isfinite(image))]=0.
errs = np.sqrt(image)
errs[np.where(image==0.)]=np.inf
parameters = LC.photometry_dict['parameters'][ind]
models = model_list[model](parameters)(p=np.ones(len(parameters)),data=image,return_models=True)
guess = models[0]
for m in models[1:]:
guess+=m
residualImage = (image-guess)/np.sqrt(image)
residualImage[np.where(image==0)] = 0
residualImage[np.invert(np.isfinite(residualImage))]=0.
self.plotArray(image=residualImage, title='Image Residual',cmap=matplotlib.cm.gnuplot2)
def pop_2DImage(self,LC,ind):
image = LC.im_dict['images'][ind]
image[np.invert(np.isfinite(image))]=0.
self.plotArray(image=image, title='Image',cmap=matplotlib.cm.gnuplot2)
centroids = LC.centroids[ind]
for star_i in range(len(centroids)):
self.axes.plot(centroids[star_i][0],centroids[star_i][1],'gx')
def hoverCanvas(self,time,event):
if event.inaxes is self.axes:
closest_time_ind = np.argmin(np.abs(time - event.xdata))
self.status_text.setText(str(time[closest_time_ind]))
def plotLightCurve(self,LC):
LC.loadLightCurve(photometryType='PSF')
time = LC.photometry_dict['startTimes']
flags = LC.photometry_dict['flag']
flux=LC.photometry_dict['flux']
for star in range(len(flux[0])):
lbl='target'
if star>0: lbl='ref'+str(star-1)
star_flux = flux[:,star]
self.axes.plot(time[np.where(flags==0.)],star_flux[np.where(flags==0.)],'.',label=lbl)
self.axes.plot(time[np.where(flags==1.)],star_flux[np.where(flags==1.)],'r.')
self.axes.plot(time[np.where(flags>1.1)],star_flux[np.where(flags>1.1)],'ro')
self.axes.plot(time[np.where(flags==1.)],star_flux[np.where(flags==1.)],'r.',label='Centroid Fail')
self.axes.plot(time[np.where(flags>1.1)],star_flux[np.where(flags>1.1)],'ro',label='Fit Fail')
self.axes.legend()
self.axes.set_xlabel('Julian Date')
self.axes.set_ylabel('Integrated Stellar Flux [counts/sec]')
cid = self.fig.canvas.mpl_connect('button_press_event',partial(clickCanvas,self,LC))
cid = self.fig.canvas.mpl_connect('motion_notify_event', partial(hoverCanvas,self,time))
self.fig.canvas.draw()
def plotLightCurveRatio(self,LC):
LC.loadLightCurve(photometryType='PSF')
time = LC.photometry_dict['startTimes']
flags = LC.photometry_dict['flag']
flux=LC.photometry_dict['flux']
targetFlux=flux[:,0]
for star in range(len(flux[0])-1):
lbl=LC.targetName + '/Ref'+str(star)
ref_flux = flux[:,star+1]
self.axes.plot(time[np.where(flags==0.)],targetFlux[np.where(flags==0.)]/ref_flux[np.where(flags==0.)],'.',label=lbl)
self.axes.legend()
self.axes.set_xlabel('Julian Date')
self.axes.set_ylabel('Integrated Stellar Flux [counts/sec]')
cid = self.fig.canvas.mpl_connect('button_press_event',partial(clickCanvas,self,LC))
cid = self.fig.canvas.mpl_connect('motion_notify_event', partial(hoverCanvas,self,time))
self.fig.canvas.draw()
if __name__ == '__main__':
#path = '/Scratch/DisplayStack/PAL2014/HAT_P1'
#identifier = '1'
path = '/Scratch/DisplayStack/PAL2014/1SWASP_J2210'
identifier = '15s_4000-9000A_flat_hp_V3'
LC=LightCurve(fileID=identifier,path=path,targetName=None,run=None,verbose=True,showPlot=False)
pop(plotFunc = partial(plotLightCurve,LC=LC),title="PSF Light Curve Popup")
pop(plotFunc = partial(plotLightCurveRatio,LC=LC),title="PSF Light Curve Popup")
##path = '/Scratch/DisplayStack/PAL2014/HAT_P1'
##identifier = '1'
#path = '/Scratch/DisplayStack/PAL2014/1SWASP_J2210'
#identifier = '0'
#LC = LightCurve(path,fileID = identifier, PSF=True)
#
#pop(plotFunc = partial(plotLightCurve,LC=LC),title="Light Curve Popup")
| gpl-2.0 |
henrykironde/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
idlead/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
SmokinCaterpillar/pypet | pypet/tests/profiling/speed_analysis/avg_runtima_as_function_of_length.py | 2 | 2266 | __author__ = 'robert'
from pypet import Environment, Trajectory
from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def job(traj):
traj.f_ares('$set.$', 42, comment='A result')
def get_runtime(length):
filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
with Environment(filename = filename,
log_levels=50, report_progress=(0.0002, 'progress', 50),
overwrite_file=True, purge_duplicate_comments=False,
log_stdout=False,
multiproc=False, ncores=2, use_pool=True,
wrap_mode='PIPE', #freeze_input=True,
summary_tables=False, small_overview_tables=False) as env:
traj = env.v_traj
traj.par.f_apar('x', 0, 'parameter')
traj.f_explore({'x': range(length)})
# traj.v_full_copy = False
max_run = 1000
for idx in range(len(traj)):
if idx > max_run:
traj.f_get_run_information(idx, copy=False)['completed'] = 1
start = time.time()
env.f_run(job)
end = time.time()
# dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
total = end - start
return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj)
def main():
#lengths = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
lengths = [100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
runtimes = [get_runtime(x) for x in lengths]
avg_runtimes = [x[0] for x in runtimes]
summed_runtime = [x[1] for x in runtimes]
plt.subplot(2, 1, 1)
plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2)
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Average Runtime per single run')
plt.grid()
plt.subplot(2, 1, 2)
plt.loglog(lengths, summed_runtime, linewidth=2)
plt.grid()
plt.xlabel('Runs')
plt.ylabel('t[s]')
plt.title('Total runtime of experiment')
plt.savefig('avg_runtime_as_func_of_lenght_1000_single_core')
plt.show()
if __name__ == '__main__':
main() | bsd-3-clause |
justincassidy/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
FernanOrtega/DAT210x | Module4/assignment1.py | 1 | 3651 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime
from mpl_toolkits.mplot3d import Axes3D
from plyfile import PlyData, PlyElement
# Every 100 data samples, we save 1. If things run too
# slow, try increasing this number. If things run too fast,
# try decreasing it... =)
reduce_factor = 100
# Look pretty...
matplotlib.style.use('ggplot')
# Load up the scanned armadillo
plyfile = PlyData.read('Datasets/stanford_armadillo.ply')
armadillo = pd.DataFrame({
'x':plyfile['vertex']['z'][::reduce_factor],
'y':plyfile['vertex']['x'][::reduce_factor],
'z':plyfile['vertex']['y'][::reduce_factor]
})
def do_PCA(armadillo):
#
# TODO: Write code to import the libraries required for PCA.
# Then, train your PCA on the armadillo dataframe. Finally,
# drop one dimension (reduce it down to 2D) and project the
# armadillo down to the 2D principal component feature space.
#
# NOTE: Be sure to RETURN your projected armadillo!
# (This projection is actually stored in a NumPy NDArray and
# not a Pandas dataframe, which is something Pandas does for
# you automatically. =)
#
from sklearn.decomposition import PCA
pca = PCA(n_components = 2, svd_solver='full')
train = pca.fit_transform(armadillo)
return train
def do_RandomizedPCA(armadillo):
#
# TODO: Write code to import the libraries required for
# RandomizedPCA. Then, train your RandomizedPCA on the armadillo
# dataframe. Finally, drop one dimension (reduce it down to 2D)
# and project the armadillo down to the 2D principal component
# feature space.
#
# NOTE: Be sure to RETURN your projected armadillo!
# (This projection is actually stored in a NumPy NDArray and
# not a Pandas dataframe, which is something Pandas does for
# you automatically. =)
#
# NOTE: SKLearn deprecated the RandomizedPCA method, but still
# has instructions on how to use randomized (truncated) method
# for the SVD solver. To find out how to use it, set `svd_solver`
# to 'randomized' and check out the full docs here
#
# http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
#
# Deprecated Method: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.RandomizedPCA.html
#
from sklearn.decomposition import PCA
pca = PCA(n_components = 2, svd_solver='randomized')
train = pca.fit_transform(armadillo)
return train
# Render the Original Armadillo
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Armadillo 3D')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(armadillo.x, armadillo.y, armadillo.z, c='green', marker='.', alpha=0.75)
# Time the execution of PCA 5000x
# PCA is ran 5000x in order to help decrease the potential of rogue
# processes altering the speed of execution.
t1 = datetime.datetime.now()
for i in range(5000): pca = do_PCA(armadillo)
time_delta = datetime.datetime.now() - t1
# Render the newly transformed PCA armadillo!
if not pca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('PCA, build time: ' + str(time_delta))
ax.scatter(pca[:,0], pca[:,1], c='blue', marker='.', alpha=0.75)
# Time the execution of rPCA 5000x
t1 = datetime.datetime.now()
for i in range(5000): rpca = do_RandomizedPCA(armadillo)
time_delta = datetime.datetime.now() - t1
# Render the newly transformed RandomizedPCA armadillo!
if not rpca is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('RandomizedPCA, build time: ' + str(time_delta))
ax.scatter(rpca[:,0], rpca[:,1], c='red', marker='.', alpha=0.75)
plt.show()
| mit |
cloud-fan/spark | python/pyspark/pandas/exceptions.py | 15 | 5003 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Exceptions/Errors used in pandas-on-Spark.
"""
from typing import Optional
class DataError(Exception):
pass
class SparkPandasIndexingError(Exception):
pass
def code_change_hint(pandas_function: Optional[str], spark_target_function: Optional[str]) -> str:
if pandas_function is not None and spark_target_function is not None:
return "You are trying to use pandas function {}, use spark function {}".format(
pandas_function, spark_target_function
)
elif pandas_function is not None and spark_target_function is None:
return (
"You are trying to use pandas function {}, checkout the spark "
"user guide to find a relevant function"
).format(pandas_function)
elif pandas_function is None and spark_target_function is not None:
return "Use spark function {}".format(spark_target_function)
else: # both none
return "Checkout the spark user guide to find a relevant function"
class SparkPandasNotImplementedError(NotImplementedError):
def __init__(
self,
pandas_function: Optional[str] = None,
spark_target_function: Optional[str] = None,
description: str = "",
):
self.pandas_source = pandas_function
self.spark_target = spark_target_function
hint = code_change_hint(pandas_function, spark_target_function)
if len(description) > 0:
description += " " + hint
else:
description = hint
super().__init__(description)
class PandasNotImplementedError(NotImplementedError):
def __init__(
self,
class_name: str,
method_name: Optional[str] = None,
arg_name: Optional[str] = None,
property_name: Optional[str] = None,
deprecated: bool = False,
reason: str = "",
):
assert (method_name is None) != (property_name is None)
self.class_name = class_name
self.method_name = method_name
self.arg_name = arg_name
if method_name is not None:
if arg_name is not None:
msg = "The method `{0}.{1}()` does not support `{2}` parameter. {3}".format(
class_name, method_name, arg_name, reason
)
else:
if deprecated:
msg = (
"The method `{0}.{1}()` is deprecated in pandas and will therefore "
+ "not be supported in pandas-on-Spark. {2}"
).format(class_name, method_name, reason)
else:
if reason == "":
reason = " yet."
else:
reason = ". " + reason
msg = "The method `{0}.{1}()` is not implemented{2}".format(
class_name, method_name, reason
)
else:
if deprecated:
msg = (
"The property `{0}.{1}()` is deprecated in pandas and will therefore "
+ "not be supported in pandas-on-Spark. {2}"
).format(class_name, property_name, reason)
else:
if reason == "":
reason = " yet."
else:
reason = ". " + reason
msg = "The property `{0}.{1}()` is not implemented{2}".format(
class_name, property_name, reason
)
super().__init__(msg)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.exceptions
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.exceptions.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.exceptions tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.exceptions,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
akimovmike/model_sweeper | mongo_enabled_learning.py | 1 | 11966 |
import operator
import marshal, types
import pymongo
import json
import numpy as np
import pandas as pd
sources = []
client = pymongo.MongoClient("host", 'port')
db = client.dislearn
db.authenticate("login", "password", source='login')
import os
import ml_metrics
import sklearn as sk
from sklearn import metrics
from bson import ObjectId
def check_sample_exists(sample_notation):
if sample_notation[-1]=='Text File':
return os.path.exists(sample_notation[0])
def load_df_from_sample_notation(sample_notation, **kvargs):
if sample_notation[-1]=='Text File':
if type(sample_notation[1])==dict:
kvargs.update(sample_notation[1])
if not 'sep' in kvargs:
kv_args['sep'] = "\t"
return pd.read_csv(sample_notation[0] , **kvargs)
def save_df_to_sample_notation(input_df, sample_notation):
if sample_notation[-1]=='Text File':
input_df.to_csv(sample_notation[0] ,sep="\t", index=False)
def sw_compute_features(learn_data, overwrite_existing=False, worker_id=None):
# learn_data = db['learns'].find_one(learn_id)
model_data = db[learn_data['Model'][-1]].find_one(learn_data['Model'][0])
# sample_df = load_df_from_sample_notation(model_data['Initial Sample Location'])
if not check_sample_exists(model_data['Feature Sample Location']) or overwrite_existing:
feature_generating_function_code = marshal.loads(db[model_data['Feature Generation Function'][-1]]\
.find_one(model_data['Feature Generation Function'][0])['Code'])
feature_generating_function = types.FunctionType(feature_generating_function_code, globals())
# save_df_to_sample_notation(, model_data['Feature Sample Location'])
learn_data = feature_generating_function(learn_data, model_data)
learn_data['Status']['Features Computed'] = True
db['learns'].update(learn_data['_id'], learn_data)
def sw_learn(learn_data, overwrite_existing, worker_id=None):
# learn_data = db['learns'].find_one(learn_id)
model_data = db[learn_data['Model'][-1]].find_one(learn_data['Model'][0])
if learn_data['Status']['Features Computed']:
if not 'Fitted Model' in learn_data or learn_data['Fitted Model']==None or overwrite_existing:
# sample_df = load_df_from_sample_notation(model_data['Feature Sample Location'])
learn_function_code = marshal.loads(db[model_data['Learn Function'][-1]]\
.find_one(model_data['Learn Function'][0])['Code'])
learn_function = types.FunctionType(learn_function_code, globals())
learn['Fitted Model'] = learn_function(learn_data, model_data)
learn_data['Status']['Model Fitted'] = True
db['learns'].update(learn_data['_id'], learn_data)
def sw_compute_prediction(learn_data, overwrite_existing, worker_id=None):
# learn_data = db['learns'].find_one(learn_id)
model_data = db[learn_data['Model'][-1]].find_one(learn_data['Model'][0])
if learn_data['Status']['Model Fitted']:
if not 'Prediction' in learn_data or learn_data['Prediction']==None or overwrite_existing:
# sample_df = load_df_from_sample_notation(model_data['Feature Sample Location'])
predict_function_code = marshal.loads(db[model_data['Prediction Computation Function'][-1]]\
.find_one(model_data['Prediction Computation Function'][0])['Code'])
predict_function = types.FunctionType(prediction_compute_function_code, globals())
learn['Prediction'] = predict_function(learn_data, model_data)
learn_data['Status']['Prediction Computed'] = True
db['learns'].update(learn_data['_id'], learn_data)
def compute_metrics(metrics, learn_data, model_data):
target_label_gound_truth = load_df_from_sample_notation(model_data['Feature Sample Location'])[model_data['Target Variable']]
prediction = learn_data['Prediction']
if metric=='AUC':
return 0.9 # zaglushka sk.metrics.auc_mathafaka( target_label_gound_truth, prediction)
def sw_evalute_model(learn_data, overwrite_existing, worker_id=None):
# learn_data = db['learns'].find_one(learn_id)
model_data = db[learn_data['Model'][-1]].find_one(learn_data['Model'][0])
if learn_data['Status']['Prediction Computed']:
for metric in learn_data['Evaluation Results'].keys():
if learn_data['Evaluation Results']==None or overwrite_existing:
learn_data['Evaluation Results'][metrics] = compute_metrics(metric, learn_data, model_data)
learn_data['Status']['Model Evaluated'] = True
db['learns'].update(learn_data['_id'], learn_data)
stage_to_func_dict={
'Features Computed':sw_compute_features,
'Model Fitted':sw_learn,
'Prediction Computed':sw_compute_prediction,
'Model Evaluated': sw_evalute_model
}
def report_on_set_of_learns(set_of_learns):
return pd.concat([pd.DataFrame(learn['Status'], index=[learn['_id']]) for learn in learns], axis=0).mean(),\
pd.concat( [pd.DataFrame(learn['Evaluation Results'], index=[learn['_id']]) \
for learn in learns], axis=0).mean()
def sw_report(task_id):
learns_set = list( db['learns'].find_many(
{'Parent Task _id':(task_id, 'learn_tasks')}
) )
model_learn_dict = {}
for learn in learns_set:
if learn['Model'] in model_learn_dict.keys():
model_learn_dict[learn['Model']].append(learn)
else:
model_learn_dict[learn['Model']] = [learn]
for model, learns_set in model_learn_dict.iteritems():
print(str(model), report_on_set_of_learns(learns_set))
def zaglushka_compute_features(learn_data, model_data):
initial_sample_df = load_df_from_sample_notation(model_data['Initial Sample Location'])
save_df_to_sample_notation(initial_sample_df.group_by(model_data["Feature Generation Index"]).apply(lambda x:x.iloc[0])\
.set_index(model_data["Feature Generation Index"], drop=False),
model_data['Feature Sample Location'])
return learn_data
def zaglushka_learn(learn_data, model_data):
sample_df = load_df_from_sample_notation(model_data['Feature Sample Location'])
save_df_to_sample_notation(sample_df.group_by(model_data["Feature Generation Index"]).apply(lambda x:x.iloc[0]),
model_data['Feature Sample Location'])
return learn_data
def zaglushka_predict(learn_data, model_data):
sample_df = load_df_from_sample_notation(model_data['Feature Sample Location'])
return list(np.random.random(sample_df.shape[0]))
def sw_create_cv_task(task_data, model_data, learn_data, **kvargs):
# сериализация функций и сохранение баз
for data in [task_data, model_data, learn_data]:
for k, object_to_serialize in data.items():
if type(object_to_serialize) == type(check_sample_exists): #just fucntion, no matter which one
lookup_in_db = db['functions'].find_one({'Name':object_to_serialize.__name__})
if lookup_in_db:
data[k] = (lookup_in_db['_id'],'functions')
else:
data[k] = (db['functions'].insert_one({
'Name': object_to_serialize.__name__,
'Type': k,
'Code': marshal.dumps(object_to_serialize.__code__),
'Serialization':'marshall',
'Libraries':['pd','np']
}).inserted_id,
'functions')
if type(object_to_serialize) == pd.core.frame.DataFrame:\
## make filename from model name
new_filename = model_data['Name'].replace(" ","_")+"_df_"+k.replace(" ","_")+".txt"
## test there is no file there, no file
## save file
## save notation to smple database if insert to database
## change
save_df_to_sample_notation(object_to_serialize, (new_filename, 'Text File') )
data[k] = (new_filename, 'Text File')
## form CV_Set if not set
if 'CV Set' not in task_data:
n_folds = kvargs['n_folds'] if 'n_folds' in kvargs else 3
cv_type = kvargs['cv_type'] if 'cv_type' in kvargs else 'train_test'
unique_indexes = load_df_from_sample_notation(model_data['Initial Sample Location'])\
[model_data["Feature Generation Index"]].unique()
if cv_type == 'train_test':
task_data['CV Set'] = [{'Train Index':db['index_sets'].insert_one({'Index Set':[str(unique_indexes[i]) for i in x[0]]}).inserted_id,
'Test Index':db['index_sets'].insert_one({'Index Set':[str(unique_indexes[i]) for i in x[1]]}).inserted_id} for x in KFold( n_folds).split(unique_indexes)]
print(task_data, model_data, learn_data)
learn_data['Model'] = (db['models'].insert_one(model_data).inserted_id,'models')
learn_data['Parent Task _id'] = (db['learn_tasks'].insert_one(task_data).inserted_id, 'learn_tasks')
task_data['_id'] = learn_data['Parent Task _id']
assert 'CV Set' in task_data
# go through CV set and save learns, adding Parent
for cv_split_data in task_data['CV Set']:
learn_data['CV Split']=cv_split_data
if '_id' in learn_data:
learn_data.pop('_id')
learn_id = db['learns'].insert_one(learn_data).inserted_id
return task_data['_id']
def sw_worker(task_id, worker_id):
task_data = db['learn_tasks'].find_one( (task_id[0] if type(task_id)==tuple else task_id) )
print(task_data)
learns_set = list( db['learns'].find({'Parent Task _id':task_id}))
for learn in learns_set:
if 'Lock' not in db['learns'].find_one(learn['_id']) or db['learns'].find_one(learn['_id'])['Lock']==worker_id:
db['learns'].update_one({'_id':learn['_id']},{'$set':{'Lock':worker_id}})
for stage in task_data['Stages']:
if not learn['Status'][stage]:
stage_to_func_dict[stage](learn, task_data['Overwrite Existed'], worker_id)
db['learns'].update_one({'_id':learn['_id']},{'$delete':'Lock'})
# sw_worker(
# sw_create_cv_task(task_data={
# 'Stages':['Features Computed','Model Fitted','Prediction Computed','Model Evaluated'],
# 'Overwrite Existed':False
# },model_data={
# "Model Name": "test_0",
# "Model Description": "test model 0 for functionality test",
# "Initial Sample Location": ('/home/jupyter/jupyter_webroot/kgl/santander/data/train.csv',{'sep':','}, 'Text File'),
# "Feature Generation Function": zaglushka_compute_features,
# "Feature Generation Index": 'ID',
# 'Target Variable': 'TARGET',
# # 'Feature Generation Params': None,
# # "Feature Evaluation Function": None,
# "Feature Sample Location": ('features_test_0.txt', 'Text File'),
# "Learn Function": zaglushka_learn,
# # "Learn Function Parameters":None,
# "Predict Function": zaglushka_predict,
# },learn_data={
# 'Status':{'Features Computed':False,'Model Fitted':False,'Prediction Computed':False,'Model Evaluated':False}
# }),
# 1) | gpl-3.0 |
great-expectations/great_expectations | tests/integration/fixtures/yellow_trip_data_pandas_fixture/one_multi_batch_request_one_validator.py | 1 | 2778 | import numpy as np
from great_expectations.core.batch import BatchRequest
from great_expectations.data_context.data_context import DataContext
from great_expectations.datasource.data_connector.batch_filter import (
BatchFilter,
build_batch_filter,
)
from great_expectations.validator.validation_graph import MetricConfiguration
context = DataContext()
suite = context.get_expectation_suite("yellow_trip_data_validations")
# This BatchRequest will retrieve all twelve batches from 2019
multi_batch_request = BatchRequest(
datasource_name="taxi_pandas",
data_connector_name="monthly",
data_asset_name="my_reports",
data_connector_query={"batch_filter_parameters": {"year": "2019"}},
)
# Instantiate the Validator
validator_multi_batch = context.get_validator(
batch_request=multi_batch_request, expectation_suite=suite
)
# The active batch should be December, as this should be the last one loaded. Confirming here.
assert validator_multi_batch.active_batch_definition.batch_identifiers["month"] == "12"
# Get the list of all batches contained by the Validator for use in the BatchFilter
total_batch_definition_list: list = [
v.batch_definition for k, v in validator_multi_batch.batches.items()
]
# Filter to all batch_definitions prior to December
pre_dec_batch_filter: BatchFilter = build_batch_filter(
data_connector_query_dict={
"custom_filter_function": lambda batch_identifiers: int(
batch_identifiers["month"]
)
< 12
and batch_identifiers["year"] == "2019"
}
)
pre_dec_batch_definition_list: list = (
pre_dec_batch_filter.select_from_data_connector_query(
batch_definition_list=total_batch_definition_list
)
)
# Get the highest max and lowest min before December
cumulative_max = 0
cumulative_min = np.Inf
for batch_definition in pre_dec_batch_definition_list:
batch_id: str = batch_definition.id
current_max = validator_multi_batch.get_metric(
MetricConfiguration(
"column.max",
metric_domain_kwargs={"column": "fare_amount", "batch_id": batch_id},
)
)
cumulative_max = current_max if current_max > cumulative_max else cumulative_max
current_min = validator_multi_batch.get_metric(
MetricConfiguration(
"column.min",
metric_domain_kwargs={"column": "fare_amount", "batch_id": batch_id},
)
)
cumulative_min = current_min if current_min < cumulative_min else cumulative_min
# Use the highest max and lowest min from before December to create an expectation which we validate against December
result = validator_multi_batch.expect_column_values_to_be_between(
"fare_amount", min_value=cumulative_min, max_value=cumulative_max
)
assert result["success"]
| apache-2.0 |
YeEmrick/learning | cs231/assignment/assignment2/experiments/FirstConvNet/conf_init_maker.py | 1 | 1588 | import os
import sys
from sklearn.externals import joblib
import json
import numpy as np
DIR_CS231n = '/Users/clement/Documents/MLearning/CS231/assignment2/'
conf = {}
# Model instance
conf['input_dim'] = (3, 32, 32)
conf['num_filters'] = [16, 32, 64, 128]
conf['filter_size'] = 3
conf['hidden_dim'] = [500, 500]
conf['num_classes'] = 10
conf['weight_scale'] = 5e-2
conf['use_batchnorm'] = True
# Solver instance
conf['update_rule'] = 'adam'
conf['lr_decay'] = 0.95
conf['batch_size'] = 50
conf['num_epochs'] = 2000
conf['print_every'] = 10
conf['verbose'] = False
conf['check_points_every'] = 1
# Helper function
def name_model(path):
''' Given a directory where you want to run a new model
automatically select the name of the model by incrementing
by 1 the largest previous model in the name'''
existing_models = [f for f in os.listdir(
path) if f.split('_')[0] == 'model']
if len(existing_models) == 0:
model = -1
else:
model = max([int(f.split('_')[1]) for f in existing_models])
return os.path.join(path, 'model_' + str(model + 1))
name = os.listdir(DIR_CS231n)
dir_json = name_model(os.path.join(
DIR_CS231n, 'experiments', 'FirstConvNet'))
conf['path'] = dir_json
try:
'Initialize the model tree'
os.mkdir(dir_json)
except:
raise ValueError(
'Cannot create the directory for the model %s' % (dir_json))
with open(os.path.join(dir_json, 'conf_init.json'), 'w+') as f:
json.dump(conf,
f,
sort_keys=True,
indent=4,
ensure_ascii=False)
| apache-2.0 |
mantidproject/mantid | scripts/test/PyChopTest.py | 3 | 12284 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""Test suite for the PyChop package
"""
import unittest
from unittest import mock
from unittest.mock import patch
import builtins
import warnings
import numpy as np
from PyChop import PyChop2
class PyChop2Tests(unittest.TestCase):
# Tests the Fermi chopper instruments
def test_pychop_fermi(self):
instnames = ['maps', 'mari', 'merlin']
res = []
flux = []
for inc, instname in enumerate(instnames):
chopobj = PyChop2(instname)
# Code should give an error if the chopper settings and Ei have
# not been set.
self.assertRaises(ValueError, chopobj.getResolution)
chopobj.setChopper('s', 200)
chopobj.setEi(18)
rr, ff = chopobj.getResFlux(np.linspace(0, 17, 10))
res.append(rr)
flux.append(ff)
# Checks that the flux should be highest for MERLIN, MARI and MAPS in that order
self.assertGreater(flux[2], flux[1])
# Note that MAPS has been upgraded so now should have higher flux than MARI.
self.assertGreater(flux[0], flux[1])
# Checks that the resolution should be best for MAPS, MARI, and MERLIN in that order
# actually MAPS and MARI resolutions are very close (previous error in MAPS distances
# meant that MARI was calculated to have a better resolution, but it *should* be MAPS)
self.assertLess(res[0][0], res[1][0])
self.assertLess(res[1][0], res[2][0])
# Now tests the standalone function
for inc, instname in enumerate(instnames):
rr, ff = PyChop2.calculate(instname, 's', 200, 18, 0)
self.assertAlmostEqual(rr[0], res[inc][0], places=7)
self.assertAlmostEqual(ff, flux[inc], places=7)
# Tests the different variants of LET
def test_pychop_let(self):
variants = ['High flux', 'Intermediate', 'High resolution']
res = []
flux = []
for inc, variant in enumerate(variants):
chopobj = PyChop2('LET', variant)
# Checks that it instantiates the correct variant
self.assertTrue(variant in chopobj.getChopper())
# Code should give an error if the chopper settings and Ei have
# not been set.
self.assertRaises(ValueError, chopobj.getResolution)
chopobj.setFrequency(200)
chopobj.setEi(18)
rr, ff = chopobj.getResFlux(np.linspace(0, 17, 10))
res.append(rr)
flux.append(ff)
# Checks that the flux should be highest for 'High flux', then 'Intermediate', 'High resolution'
self.assertGreater(flux[0], flux[1])
self.assertGreaterEqual(flux[1], flux[2])
# Checks that the resolution should be best for 'High resolution', then 'Intermediate', 'High flux'
self.assertLessEqual(res[2][0], res[1][0])
self.assertLessEqual(res[1][0], res[0][0])
# Now tests the standalone function
for inc, variant in enumerate(variants):
rr, ff = PyChop2.calculate('LET', variant, 200, 18, 0)
self.assertAlmostEqual(rr[0], res[inc][0], places=7)
self.assertAlmostEqual(ff, flux[inc], places=7)
def test_pychop_invalid_ei(self):
chopobj = PyChop2('MARI', 'G', 400.)
chopobj.setEi(120)
with warnings.catch_warnings(record=True) as w:
res = chopobj.getResolution(130.)
assert len(w) == 1
assert issubclass(w[0].category, UserWarning)
assert "Cannot calculate for energy transfer greater than Ei" in str(w[0].message)
assert np.isnan(res[0])
class MockedModule(mock.MagicMock):
# A class which is meant to act like a module
def __init__(self, *args, mock_class=mock.MagicMock, **kwargs):
super().__init__(*args, **kwargs)
self.mock_class = mock_class
def __call__(self, *args, **kwargs):
return self.mock_class(*args, **kwargs)
class MockImports():
"""
Class to mock imports. Meant to be used with patch on `builtins.__import__`
Fake modules can be access using the dot notation on this object, e.g.
>>> mockmods = MockImports(include='numpy')
>>> with patch('builtins.__import__', mockmods.import_func):
>>> import numpy.sin
>>> print(numpy.sin)
>>> print(mockmods.numpy.sin)
"""
def __init__(self, include=None, replace=None):
# By default all imports will be mocked.
# Use the include list to mock only specified modules (and submodules)
# Use replace to specify a object to substitute for the real module
self.include = include
self.replace = replace
self.real_import = builtins.__import__
self.loaded_modules = {} # Stores mocks of loaded fake modules
self.loaded_from = {} # Stores mocks of "from" syntax
if replace:
for replacement_mock in replace:
if replacement_mock not in self.include:
self.include.append(replacement_mock)
def _check_dot_names(self, name, ref_list):
for ref_name in ref_list:
level = ref_name.count('.') + 1
requested = '.'.join(name.split('.')[:level])
if ref_name == requested:
return name
return None
def is_module_included(self, module_name):
if not self.include:
return True
check_includes = self._check_dot_names(module_name, self.include)
return True if check_includes is not None else False
def get_mock(self, name, fromlist):
replacement = self._check_dot_names(name, self.replace)
root_name = name.split('.')[0]
save = False
if replacement is not None:
rv = self.replace[replacement]
elif root_name in self.loaded_modules:
rv = self.loaded_modules[root_name]
elif name in self.loaded_from:
rv = self.loaded_from[name]
else:
rv = mock.MagicMock()
save = True
if fromlist and replacement is None:
for mods in fromlist:
replacement = self._check_dot_names(mods, self.replace)
if replacement is not None:
setattr(rv, mods, self.replace[replacement])
if save:
self.save_module(name, fromlist, rv)
return rv
def save_module(self, name, fromlist, mock_object):
root_name = name.split('.')[0]
self.loaded_modules[root_name] = mock_object
if fromlist:
for mods in fromlist:
self.loaded_from[mods] = mock_object
def import_func(self, name, globals=None, locals=None, fromlist=(), level=0):
prf = f'name:{name}, from:{fromlist}, level:{level}'
if self.include:
if self.is_module_included(name):
return self.get_mock(name, fromlist)
else:
return self.real_import(name, globals, locals, fromlist, level)
else:
return MockedModule()
def __getattr__(self, module_name):
if module_name in self.loaded_modules:
return self.loaded_modules[module_name]
if module_name in self.loaded_from:
return getattr(self.loaded_from[module_name], module_name)
class PyChopGuiTests(unittest.TestCase):
# Tests GUI routines
@classmethod
def setUpClass(cls):
class fake_QMainWindow():
def __init__(self, *args, **kwargs):
self.menuBar = mock.MagicMock()
self.setCentralWidget = mock.MagicMock()
self.setWindowTitle = mock.MagicMock()
def setWindowFlags(self, *args, **kwargs): # noqa: E306
pass
def show(self): # noqa: E306
pass
class fake_QCombo(mock.MagicMock): # noqa: E306
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.clear()
def clear(self): # noqa: E306
self.items = []
self.currentIndex = 0
def addItem(self, item): # noqa: E306
self.items.append(item)
def currentText(self): # noqa: E306
return self.items[self.currentIndex]
def count(self): # noqa: E306
return len(self.items)
def itemText(self, idx): # noqa: E306
return self.items[idx]
def setCurrentIndex(self, idx): # noqa: E306
self.currentIndex = idx
def __getattr__(self, attribute): # noqa: E306
if attribute not in self.__dict__:
self.__dict__[attribute] = mock.MagicMock()
return self.__dict__[attribute]
class fake_Line(mock.MagicMock): # noqa: E306
def __init__(self, parent, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = parent
def set_label(self, label): # noqa: E306
parent.legends[self] = label
class fake_Axes(mock.MagicMock): # noqa: E306
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.legends = {}
def plot(self, *args, **kwargs): # noqa: E306
self.lines.append(fake_Line(self))
return self.lines[-1],
def get_legend_handles_labels(self): # noqa: E306
labels = [self.legends[line] for line in self.lines]
return self.lines, labels
class fake_Figure(mock.MagicMock): # noqa: E306
def add_subplot(self, *args, **kwargs):
return fake_Axes()
class fake_Slider(mock.MagicMock): # noqa: E306
def __init__(self, parent, label, valmin, valmax, **kwargs):
super().__init__(parent, label, valmin, valmax, **kwargs)
self.parent, self.label, self.valmin, self.valmax = parent, label, valmin, valmax
self.val = kwargs.pop('valinit', 0.5)
self.valtext = mock.MagicMock()
self.on_changed = mock.MagicMock()
cls.mock_modules = MockImports(include=['qtpy', 'matplotlib', 'mantidqt', 'mantid.plots'],
replace={'QMainWindow':fake_QMainWindow,
'QComboBox':MockedModule(mock_class=fake_QCombo),
'Figure':MockedModule(mock_class=fake_Figure),
'Slider':MockedModule(mock_class=fake_Slider)})
# Mess around with import mechanism _inside_ PyChopGui so GUI libs not really imported
with patch('builtins.__import__', cls.mock_modules.import_func):
from PyChop import PyChopGui
cls.window = PyChopGui.PyChopGui()
cls.window.eiPlots.isChecked = mock.MagicMock(return_value=False)
cls.mock_modules.matplotlib.__version__ = '2.1.0'
def test_hyspec(self):
# Tests that Hyspec routines are only called when the instrument is Hyspec
with patch.object(self.window, 'setS2') as setS2:
self.window.setInstrument('MAPS')
self.window.calc_callback()
setS2.assert_not_called()
self.window.setInstrument('HYSPEC')
self.window.calc_callback()
setS2.assert_called()
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
wnesl/gnuradio-IA | gr-digital/examples/example_timing.py | 17 | 7791 | #!/usr/bin/env python
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = gr.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = 2*scipy.pi/100.0
nfilts = 32
rrc_taps_rx = gr.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = gr.vector_source_c(data.tolist(), False)
self.rrc = gr.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = gr.channel_model(noise, foffset, toffset)
self.off = gr.fractional_interpolator_cc(0.20, 1.0)
if mode == 0:
self.clk = gr.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 3.5)
self.taps = self.clk.get_taps()
self.dtaps = self.clk.get_diff_taps()
self.vsnk_err = gr.vector_sink_f()
self.vsnk_rat = gr.vector_sink_f()
self.vsnk_phs = gr.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = 0.1
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = gr.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_clk = gr.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.off, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bo-")
s2.plot(data_clk.real, "ro")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "o-")
s2.plot(data_clk.real, "ro")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
anaderi/lhcb_trigger_ml | hep_ml/commonutils.py | 1 | 14778 | """
`commonutils` contains some helpful functions and classes
which are often used (by other modules)
"""
from __future__ import print_function, division, absolute_import
import math
import io
import numbers
import numpy
import pandas
from numpy.random.mtrand import RandomState
from scipy.special import expit
import sklearn.cross_validation
from sklearn.neighbors.unsupervised import NearestNeighbors
__author__ = "Alex Rogozhnikov"
def execute_notebook(filename):
"""Allows one to execute cell-by-cell some IPython notebook provided its name"""
from IPython.core.getipython import get_ipython
from IPython.nbformat import current
with io.open(filename) as f:
notebook = current.read(f, 'json')
ip = get_ipython()
for cell in notebook.worksheets[0].cells:
if cell.cell_type == 'code':
ip.run_cell(cell.input)
def map_on_cluster(ipc_profile, *args, **kw_args):
"""The same as map, but the first argument is ipc_profile. Distributes the task over IPython cluster.
Important: this function is not lazy!
:param str|None ipc_profile: the IPython cluster profile to use.
:return: the result of mapping
"""
if ipc_profile is None:
return list(map(*args, **kw_args))
else:
from IPython.parallel import Client
return Client(profile=ipc_profile).load_balanced_view().map_sync(*args, **kw_args)
def sigmoid_function(x, width):
""" Sigmoid function is smoothing of Heaviside function,
the less width, the closer we are to Heaviside function
:type x: array-like with floats, arbitrary shape
:type width: float, if width == 0, this is simply Heaviside function
"""
assert width >= 0, 'the width should be non-negative'
if abs(width) > 0.0001:
return expit(x / width)
else:
return (x > 0) * 1.0
def generate_sample(n_samples, n_features, distance=2.0):
"""Generates some test distribution,
signal and background distributions are gaussian with same dispersion and different centers,
all variables are independent (gaussian correlation matrix is identity)"""
from sklearn.datasets import make_blobs
centers = numpy.zeros((2, n_features))
centers[0, :] = - distance / 2
centers[1, :] = distance / 2
X, y = make_blobs(n_samples=n_samples, n_features=n_features, centers=centers)
columns = ["column" + str(x) for x in range(n_features)]
X = pandas.DataFrame(X, columns=columns)
return X, y
def check_uniform_label(uniform_label):
""" Convert to numpy.array
:param uniform_label: label or list of labels (examples: 0, 1, [0], [1], [0, 1])
:return: numpy.array (with [0], [1] or [0, 1])
"""
if isinstance(uniform_label, numbers.Number):
return numpy.array([uniform_label])
else:
return numpy.array(uniform_label)
def reorder_by_first(*arrays):
""" Applies the same permutation to all passed arrays,
permutation sorts the first passed array """
arrays = check_arrays(*arrays)
order = numpy.argsort(arrays[0])
return [arr[order] for arr in arrays]
def reorder_by_first_inverse(*arrays):
"""The same as reorder, but the first array is ordered by descending"""
arrays = check_arrays(*arrays)
order = numpy.argsort(-arrays[0])
return [arr[order] for arr in arrays]
def train_test_split(*arrays, **kw_args):
"""Does the same thing as train_test_split, but preserves columns in DataFrames.
Uses the same parameters: test_size, train_size, random_state, and has the same interface
:type list[numpy.array|pandas.DataFrame] arrays: arrays to split
"""
assert len(arrays) > 0, "at least one array should be passed"
length = len(arrays[0])
for array in arrays:
assert len(array) == length, "different size"
train_indices, test_indices = sklearn.cross_validation.train_test_split(range(length), **kw_args)
result = []
for array in arrays:
if isinstance(array, pandas.DataFrame):
result.append(array.iloc[train_indices, :])
result.append(array.iloc[test_indices, :])
else:
result.append(array[train_indices])
result.append(array[test_indices])
return result
def weighted_percentile(array, percentiles, sample_weight=None, array_sorted=False, old_style=False):
""" Very close to numpy.precentile, but supports weights.
NOTE: percentiles should be in [0, 1]!
:param array: numpy.array with data
:param percentiles: array-like with many percentiles
:param sample_weight: array-like of the same length as `array`
:param array_sorted: bool, if True, then will avoid sorting
:param old_style: if True, will correct output to be consistent with numpy.percentile.
:return: numpy.array with computed percentiles.
"""
array = numpy.array(array)
percentiles = numpy.array(percentiles)
sample_weight = check_sample_weight(array, sample_weight)
assert numpy.all(percentiles >= 0) and numpy.all(percentiles <= 1), 'Percentiles should be in [0, 1]'
if not array_sorted:
array, sample_weight = reorder_by_first(array, sample_weight)
weighted_quantiles = numpy.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= numpy.sum(sample_weight)
return numpy.interp(percentiles, weighted_quantiles, array)
def build_normalizer(signal, sample_weight=None):
"""Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
>>>normalizer = build_normalizer(signal)
>>>pylab.hist(normalizer(background))
>>># this one should be uniform in [0,1]
>>>pylab.hist(normalizer(signal))
Parameters:
:param numpy.array signal: shape = [n_samples] with floats
:param numpy.array sample_weight: shape = [n_samples], non-negative weights associated to events.
"""
sample_weight = check_sample_weight(signal, sample_weight)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
signal, sample_weight = reorder_by_first(signal, sample_weight)
predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def normalizing_function(data):
return numpy.interp(data, signal, predictions)
return normalizing_function
def compute_cut_for_efficiency(efficiency, mask, y_pred, sample_weight=None):
""" Computes such cut(s), that provide given signal efficiency.
:type efficiency: float or numpy.array with target efficiencies, shape = [n_effs]
:type mask: array-like, shape = [n_samples], True for needed classes
:type y_pred: array-like, shape = [n_samples], predictions or scores (float)
:type sample_weight: None | array-like, shape = [n_samples]
:return: float or numpy.array, shape = [n_effs]
"""
sample_weight = check_sample_weight(mask, sample_weight)
assert len(mask) == len(y_pred), 'lengths are different'
efficiency = numpy.array(efficiency)
is_signal = mask > 0.5
y_pred, sample_weight = y_pred[is_signal], sample_weight[is_signal]
return weighted_percentile(y_pred, 1. - efficiency, sample_weight=sample_weight)
def compute_bdt_cut(target_efficiency, y_true, y_pred, sample_weight=None):
"""Computes cut which gives fixed efficiency.
:type target_efficiency: float from 0 to 1 or numpy.array with floats in [0,1]
:type y_true: numpy.array, of zeros and ones, shape = [n_samples]
:type y_pred: numpy.array, prediction probabilities returned by classifier, shape = [n_samples]
"""
assert len(y_true) == len(y_pred), "different size"
signal_proba = y_pred[y_true > 0.5]
percentiles = 1. - target_efficiency
sig_weights = None if sample_weight is None else sample_weight[y_true > 0.5]
return weighted_percentile(signal_proba, percentiles, sample_weight=sig_weights)
# region Knn-related things
# TODO update interface here and in all other places to work
# without columns
def computeSignalKnnIndices(uniform_variables, dataframe, is_signal, n_neighbors=50):
"""For each event returns the knn closest signal(!) events. No matter of what class the event is.
:type uniform_variables: list of names of variables, using which we want to compute the distance
:type dataframe: pandas.DataFrame, should contain these variables
:type is_signal: numpy.array, shape = [n_samples] with booleans
:rtype numpy.array, shape [len(dataframe), knn], each row contains indices of closest signal events
"""
assert len(dataframe) == len(is_signal), "Different lengths"
signal_indices = numpy.where(is_signal)[0]
for variable in uniform_variables:
assert variable in dataframe.columns, "Dataframe is missing %s column" % variable
uniforming_features_of_signal = numpy.array(dataframe.ix[is_signal, uniform_variables])
neighbours = NearestNeighbors(n_neighbors=n_neighbors, algorithm='kd_tree').fit(uniforming_features_of_signal)
_, knn_signal_indices = neighbours.kneighbors(dataframe[uniform_variables])
return numpy.take(signal_indices, knn_signal_indices)
def computeKnnIndicesOfSameClass(uniform_variables, X, y, n_neighbours=50):
"""Works as previous function, but returns the neighbours of the same class as element
:param list[str] uniform_variables: the names of columns"""
assert len(X) == len(y), "different size"
result = numpy.zeros([len(X), n_neighbours], dtype=numpy.int)
for label in set(y):
is_signal = y == label
label_knn = computeSignalKnnIndices(uniform_variables, X, is_signal, n_neighbours)
result[is_signal, :] = label_knn[is_signal, :]
return result
# endregion
def smear_dataset(testX, smeared_variables=None, smearing_factor=0.1):
"""For the selected features 'smears' them in dataset,
pay attention, that only float feature can be smeared by now.
If smeared variables is None, all the features are smeared"""
assert isinstance(testX, pandas.DataFrame), "the passed object is not of type pandas.DataFrame"
testX = pandas.DataFrame.copy(testX)
if smeared_variables is None:
smeared_variables = testX.columns
for var in smeared_variables:
assert var in testX.columns, "The variable %s was not found in dataframe"
result = pandas.DataFrame.copy(testX)
for var in smeared_variables:
sigma = math.sqrt(numpy.var(result[var]))
result[var] += RandomState().normal(0, smearing_factor * sigma, size=len(result))
return result
def memory_usage():
"""Memory usage of the current process in bytes. Created for notebooks.
This will only work on systems with a /proc file system (like Linux)."""
result = {'peak': 0, 'rss': 0}
with open('/proc/self/status') as status:
for line in status:
parts = line.split()
key = parts[0][2:-1].lower()
if key in result:
result[key] = "{:,} kB".format(int(parts[1]))
return result
def indices_of_values(array):
"""For each value in array returns indices with this value
:param array: numpy.array with 1-dimensional initial data
:return: sequence of tuples (value, indices_with_this_value), sequence is ordered by value
"""
indices = numpy.argsort(array)
sorted_array = array[indices]
diff = numpy.nonzero(numpy.ediff1d(sorted_array))[0]
limits = [0] + list(diff + 1) + [len(array)]
for i in range(len(limits) - 1):
yield sorted_array[limits[i]], indices[limits[i]: limits[i + 1]]
def print_header(text, level=3):
"""
Function to be used in notebooks to display headers not just plain text
:param text: str or object to print its __repr__
:param level: int, from 1 to 6 (1st, 2nd, 3rd order header)
"""
from IPython.display import display_html
display_html("<h{level}>{header}</h{level}>".format(header=text, level=level), raw=True)
def take_features(X, features):
"""
Takes features from dataset.
:param X: numpy.array or pandas.DataFrame
:param features: list of strings (if pandas.DataFrame) or list of ints
:return: pandas.DataFrame or numpy.array with the same length.
NOTE: may return view to original data!
"""
from numbers import Number
are_strings = all([isinstance(feature, str) for feature in features])
are_numbers = all([isinstance(feature, Number) for feature in features])
if are_strings and isinstance(X, pandas.DataFrame):
return X.ix[:, features]
elif are_numbers:
return numpy.array(X)[:, features]
else:
raise NotImplementedError("Can't take features {} from object of type {}".format(features, type(X)))
def check_sample_weight(y_true, sample_weight):
"""
Checks the weights, returns normalized version
:param y_true: numpy.array of shape [n_samples]
:param sample_weight: array-like of shape [n_samples] or None
:returns: numpy.array with weights of shape [n_samples]"""
if sample_weight is None:
return numpy.ones(len(y_true), dtype=numpy.float)
else:
sample_weight = numpy.array(sample_weight, dtype=numpy.float)
assert len(y_true) == len(sample_weight), \
"The length of weights is different: not {0}, but {1}".format(len(y_true), len(sample_weight))
return sample_weight
def check_xyw(X, y, sample_weight=None):
"""
Checks parameters of classifier / loss / metrics
:param X: array-like of shape [n_samples, n_features] (numpy.array or pandas.DataFrame)
:param y: array-like of shape [n_samples]
:param sample_weight: None or array-like of shape [n_samples]
:return:
"""
from sklearn.utils.validation import column_or_1d
y = column_or_1d(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
assert len(X) == len(y), 'Lengths are different'
if not (isinstance(X, pandas.DataFrame) or (isinstance(X, numpy.ndarray))):
X = numpy.array(X)
return X, y, sample_weight
def check_arrays(*arrays):
"""
Minor lazy substitution for sklearn.check_arrays
:param arrays:
:return:
"""
assert len(arrays) > 0, 'The number of array must be greater than zero'
checked_arrays = []
shapes = []
for arr in arrays:
if arr is not None:
checked_arrays.append(numpy.array(arr))
shapes.append(checked_arrays[-1].shape[0])
else:
checked_arrays.append(arr)
assert numpy.sum(numpy.array(shapes) == shapes[0]) == len(shapes), 'Different shapes of the arrays {}'.format(shapes)
return checked_arrays
| mit |
bradleyhd/netsim | speedup_params_graph.py | 1 | 1883 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
#plt.figure(num=None, figsize=(16, 12), dpi=300, facecolor='w', edgecolor='k')
data = [[0.1,0.1,-7.434018514593847],[0.1,0.25,-2.7369569138303933],[0.1,0.5,-1.079596449962587],[0.1,0.75,4.596619461976396],[0.1,0.9,10.830481631205592],[0.25,0.1,0.21121155599113534],[0.25,0.25,7.481430746270853],[0.25,0.5,2.8832030633129158],[0.25,0.75,1.9925524276597761],[0.25,0.9,2.9447124715510937],[0.5,0.1,4.757563383369764],[0.5,0.25,-3.1984266947325426],[0.5,0.5,6.347890828747816],[0.5,0.75,6.6237753260623515],[0.5,0.9,2.858226562113666],[0.75,0.1,1.2363156675448093],[0.75,0.25,-3.2559043644701],[0.75,0.5,-2.800127094158841],[0.75,0.75,6.053966184836968],[0.75,0.9,-2.7392444750793308],[0.9,0.1,0.7360892695013829],[0.9,0.25,-2.905678488062344],[0.9,0.5,0.6593192258260506],[0.9,0.75,-0.7692772111599379],[0.9,0.9,-3.3453158431961576]]
data = np.array(data)
# popt, pcov = curve_fit(exponential, x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.scatter(data[:, 0], data[:, 1], zs=data[:, 2], c=data[:, 2], cmap=plt.get_cmap('jet'))
# plt.plot(xl, exponential(xl, *popt), ln_fmt)
# graph(0, 'EDS5 - regular', 'ro', 'r--')
# graph(1, 'D5 - regular', 'bs', 'b--')
# graph(2, 'EDS5 - decision', 'go', 'g--')
# graph(3, 'D5 - decision', 'ms', 'm--')
plt.title('Effects of Weight Smoothing and Decay on Mean Speedup')
plt.xlabel('Smoothing')
plt.ylabel('Decay')
ax.set_zlabel('Mean Speedup')
plt.legend(loc=0, numpoints=1)
axes = plt.gca()
# axes.set_xscale('symlog')
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# plt.savefig('nodes_vs_edges_added.png')
plt.show() | gpl-3.0 |
klim-/pyplane | gui/Ui_PyPlane_about.py | 1 | 10621 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/Ui_PyPlane_about.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_DlgAbout(object):
def setupUi(self, DlgAbout):
DlgAbout.setObjectName(_fromUtf8("DlgAbout"))
DlgAbout.resize(470, 429)
self.buttonBox = QtGui.QDialogButtonBox(DlgAbout)
self.buttonBox.setGeometry(QtCore.QRect(120, 397, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.grpInfo = QtGui.QGroupBox(DlgAbout)
self.grpInfo.setGeometry(QtCore.QRect(10, 3, 451, 391))
self.grpInfo.setObjectName(_fromUtf8("grpInfo"))
self.verticalLayoutWidget = QtGui.QWidget(self.grpInfo)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 20, 431, 361))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setContentsMargins(-1, 12, -1, -1)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_pyplane_version = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_pyplane_version.setFont(font)
self.label_pyplane_version.setObjectName(_fromUtf8("label_pyplane_version"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_pyplane_version)
self.pyplane_version_info = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pyplane_version_info.setFont(font)
self.pyplane_version_info.setObjectName(_fromUtf8("pyplane_version_info"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.pyplane_version_info)
self.label_pyplane_date = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_pyplane_date.setFont(font)
self.label_pyplane_date.setObjectName(_fromUtf8("label_pyplane_date"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_pyplane_date)
self.pyplane_date = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pyplane_date.setFont(font)
self.pyplane_date.setObjectName(_fromUtf8("pyplane_date"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.pyplane_date)
self.label_platform = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_platform.setFont(font)
self.label_platform.setObjectName(_fromUtf8("label_platform"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.label_platform)
self.pyplane_platform = QtGui.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pyplane_platform.setFont(font)
self.pyplane_platform.setObjectName(_fromUtf8("pyplane_platform"))
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.pyplane_platform)
self.verticalLayout_2.addLayout(self.formLayout_2)
self.txtCopyright = QtGui.QLabel(self.verticalLayoutWidget)
self.txtCopyright.setAlignment(QtCore.Qt.AlignCenter)
self.txtCopyright.setWordWrap(True)
self.txtCopyright.setOpenExternalLinks(False)
self.txtCopyright.setObjectName(_fromUtf8("txtCopyright"))
self.verticalLayout_2.addWidget(self.txtCopyright)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setMaximumSize(QtCore.QSize(200, 200))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/icons/pyplane_logo.png")))
self.label.setScaledContents(True)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.txtGPL = QtGui.QLabel(self.verticalLayoutWidget)
self.txtGPL.setAlignment(QtCore.Qt.AlignCenter)
self.txtGPL.setWordWrap(True)
self.txtGPL.setOpenExternalLinks(False)
self.txtGPL.setObjectName(_fromUtf8("txtGPL"))
self.verticalLayout.addWidget(self.txtGPL)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setWordWrap(True)
self.label_2.setOpenExternalLinks(False)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label_python_version = QtGui.QLabel(self.verticalLayoutWidget)
self.label_python_version.setObjectName(_fromUtf8("label_python_version"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_python_version)
self.python_version_info = QtGui.QLabel(self.verticalLayoutWidget)
self.python_version_info.setObjectName(_fromUtf8("python_version_info"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.python_version_info)
self.label_qt_version = QtGui.QLabel(self.verticalLayoutWidget)
self.label_qt_version.setObjectName(_fromUtf8("label_qt_version"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_qt_version)
self.label_pyqt_version = QtGui.QLabel(self.verticalLayoutWidget)
self.label_pyqt_version.setObjectName(_fromUtf8("label_pyqt_version"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_pyqt_version)
self.label_matplotlib_version = QtGui.QLabel(self.verticalLayoutWidget)
self.label_matplotlib_version.setObjectName(_fromUtf8("label_matplotlib_version"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_matplotlib_version)
self.qt_version_info = QtGui.QLabel(self.verticalLayoutWidget)
self.qt_version_info.setObjectName(_fromUtf8("qt_version_info"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.qt_version_info)
self.pyqt_version_info = QtGui.QLabel(self.verticalLayoutWidget)
self.pyqt_version_info.setObjectName(_fromUtf8("pyqt_version_info"))
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.pyqt_version_info)
self.matplotlib_version_info = QtGui.QLabel(self.verticalLayoutWidget)
self.matplotlib_version_info.setObjectName(_fromUtf8("matplotlib_version_info"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.matplotlib_version_info)
self.verticalLayout.addLayout(self.formLayout)
self.retranslateUi(DlgAbout)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), DlgAbout.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), DlgAbout.reject)
QtCore.QMetaObject.connectSlotsByName(DlgAbout)
def retranslateUi(self, DlgAbout):
DlgAbout.setWindowTitle(_translate("DlgAbout", "About", None))
self.grpInfo.setTitle(_translate("DlgAbout", "Information", None))
self.label_pyplane_version.setText(_translate("DlgAbout", "PyPlane Version:", None))
self.pyplane_version_info.setText(_translate("DlgAbout", "TextLabel", None))
self.label_pyplane_date.setText(_translate("DlgAbout", "Date:", None))
self.pyplane_date.setText(_translate("DlgAbout", "TextLabel", None))
self.label_platform.setText(_translate("DlgAbout", "Platform:", None))
self.pyplane_platform.setText(_translate("DlgAbout", "TextLabel", None))
self.txtCopyright.setText(_translate("DlgAbout", "Copyright (C) 2013-2016\n"
"by Klemens Fritzsche, Carsten Knoll, \n"
"Jan Winkler\n"
"Technische Universität Dresden\n"
"Institut für Regelungs- und Steuerungstheorie\n"
"http://www.et.tu-dresden.de/rst/", None))
self.txtGPL.setText(_translate("DlgAbout", "This code is free software, licensed under the terms of the GNU General Public License, Version 3\n"
"http://www.gnu.org/license/", None))
self.label_2.setText(_translate("DlgAbout", "Please consult\n"
"<https://github.com/TUD-RST/pyplane.git>\n"
" for updated versions of this program!", None))
self.label_python_version.setText(_translate("DlgAbout", "Python-Version:", None))
self.python_version_info.setText(_translate("DlgAbout", "TextLabel", None))
self.label_qt_version.setText(_translate("DlgAbout", "QT-Version:", None))
self.label_pyqt_version.setText(_translate("DlgAbout", "PyQT-Version:", None))
self.label_matplotlib_version.setText(_translate("DlgAbout", "Matplotlib-Version:", None))
self.qt_version_info.setText(_translate("DlgAbout", "TextLabel", None))
self.pyqt_version_info.setText(_translate("DlgAbout", "TextLabel", None))
self.matplotlib_version_info.setText(_translate("DlgAbout", "TextLabel", None))
import icons_rc
| gpl-3.0 |
Winand/pandas | pandas/tests/indexes/timedeltas/test_timedelta_range.py | 6 | 2680 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Second
from pandas import to_timedelta, timedelta_range
from pandas.util.testing import assert_frame_equal
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(end='5 days')
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
timedelta_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
timedelta_range(start='0 days', end='5 days', periods=10)
| bsd-3-clause |
daniaki/ppi_wrangler | clf_br.py | 1 | 19645 | #!/usr/bin/python
"""
Created on 2016-01-2016
@author: Daniel Esposito
@contact: [email protected]
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import sys
from datetime import datetime
import pickle
import tempfile
from predict.utils import su_make_dir
import predict.preprocess as prep
from predict.evaluation import Statistics
from predict.cross_validation import DataFrameStratifiedKFold
from predict.utils import parallel_map, pretty_print_dict, create_seeds
from predict.preprocess import get_labels_from_file, generate_selectors
from predict.learning import evaluate_model, multi_label_evaluate
from predict.learning import HybridFeatureVotingClassifier
from predict.ontology import load_go_dag
from sklearn.base import clone
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import scale as mean_center
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_unif
from scipy.stats import rv_continuous
# ---------------------------------- CLASSIFIER ------------------------------- #
class uniform_gen_5(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, size=(5,))
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def sk_generate_params(method, columns=None):
param_dist = {}
if 'rf' in method:
param_dist = {
"max_features": sp_unif(0.01, 1.0),
"n_estimators": sp_randint(32, 256),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"min_samples_split": sp_randint(2, 10),
"min_samples_leaf": sp_randint(2, 10),
"min_weight_fraction_leaf": sp_unif(0., 0.5),
"class_weight": ['balanced', 'balanced_subsample']
}
elif 'svm' in method:
param_dist = {
"C": sp_unif(0.01, 20.),
"kernel": ["linear"]
}
elif 'lr' in method:
param_dist = {
"C": sp_unif(0.01, 20.),
"penalty": ["l1", "l2"],
}
if 'bagged' in method:
_param_dist = {}
for c in columns:
for k, v in param_dist.items():
_param_dist['{}__{}'.format(c, k)] = v
_param_dist['weights'] = uniform_gen_5(0., 1.)
return _param_dist
else:
return param_dist
def make_classifiers(method, balanced, labels, selectors=None, columns=None, random_state=None):
estimators = {}
class_weight = None
if balanced:
class_weight = 'balanced'
# Make appropriate delegatation
if 'lr' in method:
estimator = LogisticRegression(n_jobs=1)
elif 'svm' in method:
estimator = SVC(probability=False)
elif 'rf' in method:
estimator = RandomForestClassifier(n_jobs=1)
else:
raise ValueError("Not implemented for method {}".format(method))
estimator = estimator.set_params(**{'class_weight': class_weight, 'random_state': random_state})
if hasattr(estimator, 'n_jobs'):
estimator.set_params(**{'n_jobs': 1})
if 'bagged' in method:
for l in labels:
named_estimators = zip(columns, [clone(estimator) for _ in columns])
weights = [1] * len(columns)
estimators[l] = HybridFeatureVotingClassifier(
named_estimators, selectors, voting='soft', weights=weights, n_jobs=4
)
else:
for l in labels:
estimators[l] = clone(estimator)
return estimators
# ---------------------------------- MAIN ------------------------------- #
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--method", help="Classification method", type=str, default='lr')
parser.add_argument("-j", "--iterations", help="Number of bootstraps.", type=int, default=25)
parser.add_argument("-f", "--folds", help="Number of cross-val folds.", type=int, default=5)
parser.add_argument("-n", "--jobs", help="Number of jobs to spawn.", type=int, default=1)
parser.add_argument("-i", "--induce", help="Use GO term induction.", action='store_true', default=False)
parser.add_argument("-v", "--vectoriser", help="Vectoriser method to use.", type=str, default='count')
parser.add_argument("-b", "--binary", help="Use binary count vectors.", action='store_true', default=False)
parser.add_argument("-w", "--balanced", help="Use balanced from sklearn.", action='store_true', default=False)
parser.add_argument("-go", "--go", help="Use GO terms in features.", action='store_true', default=False)
parser.add_argument("-pf", "--pfam", help="Use pfam terms in fetures.", action='store_true', default=False)
parser.add_argument("-ipr", "--interpro", help="Use interpro terms in fetures.", action='store_true', default=False)
parser.add_argument("-bp", "--biological_process", action='store_true', default=False)
parser.add_argument("-mf", "--molecular_function", action='store_true', default=False)
parser.add_argument("-cc", "--cellular_component", action='store_true', default=False)
parser.add_argument("-p", "--permute", action='store_true', default=False)
parser.add_argument("-s", "--scale", action='store_true', default=False)
parser.add_argument("-o1", "--output_folder", type=str, default='')
parser.add_argument("-o2", "--output_file_suffix", type=str, default='')
args = parser.parse_args()
method = args.method.lower()
iterations = args.iterations
cv_folds = args.folds
induce = args.induce
vectorizer_method = args.vectoriser
go = args.go
pfam = args.pfam
ipr = args.interpro
binary = args.binary
n_jobs = args.jobs
balanced = args.balanced
bp = args.biological_process
mf = args.molecular_function
cc = args.cellular_component
permute = args.permute
scale = args.scale
folder = args.output_folder
file_suffix = args.output_file_suffix
# ----------------------------- SETUP ----------------------------------- #
date = str(datetime.now()).replace(" ", "-").replace(":", '-').replace('.', '-')
log = open("tmp/training_log.txt", "w")
dag = load_go_dag('data/gene_ontology.1_2.obo')
if vectorizer_method not in ['count', 'tf-idf']:
print('Vectorizer Method must select from: count | tf-idf')
sys.exit(1)
if folder:
direc = 'results/{}-{}'.format(folder, date)
su_make_dir(direc)
else:
direc = tempfile.mkdtemp(prefix='{}-{}-'.format(method, date), dir='results/')
selection = []
ontologies = []
if pfam:
selection.append('pfam')
if ipr:
selection.append('ipr')
go_type = None
if induce and go:
go_type = 'induced_go'
elif go and not induce:
go_type = 'go'
if go_type and not (cc or bp or mf):
print("Must select at least one ontology if using GO annotations.")
sys.exit(1)
if go_type and (cc or bp or mf):
if cc:
selection.append(go_type + '_cc')
ontologies.append('cc')
if bp:
selection.append(go_type + '_bp')
ontologies.append('bp')
if mf:
selection.append(go_type + '_mf')
ontologies.append('mf')
if len(selection) == 0:
print("Please select some features using the command line args. Use --help or -h for help.")
sys.exit(1)
config = {
'date': date,
'method': method,
'binary': binary,
'balanced': balanced,
'induce': induce,
'iteration': iterations,
'cv_folds': cv_folds,
'selection': selection,
'ontologies': ontologies,
'vectorizer_method': vectorizer_method,
'permuted': permute,
'scale': scale
}
pretty_print_dict(config)
# ----------------------------- LOAD DATA ----------------------------------- #
np.random.seed(42)
developement_df, testing_df = prep.prep_data_frames(selection, load_interactome=False)
labels = get_labels_from_file('data/labels.tsv')
n = len(labels)
split_train = {l:0 for l in labels}
for l in labels:
split_train[l] = sum(developement_df[l].values)
split_test = {l:0 for l in labels}
for l in labels:
split_test[l] = sum(testing_df[l].values)
n_samples_train = len(developement_df)
n_samples_test = len(testing_df)
# Create the appropriate statistics container for the whole experiment.
training_stats = Statistics()
validation_stats = Statistics()
testing_stats = Statistics()
seeds = create_seeds(iterations)
min_class_freq = min(split_train.values())
cv_folds = min([min_class_freq, cv_folds])
statistics_objects = []
best_params = {l: {'score': 0.0, 'params': {}} for l in labels}
print("Running Supervised Ensemble Classification...")
# def do_iteration(i):
for i in range(iterations):
print("Iteration " + str(i+1))
rng = np.random.RandomState()
rng.seed(seeds[i])
dev_df_i = developement_df.copy(deep=True)
test_df_i = testing_df.copy(deep=True)
folds_i = list(DataFrameStratifiedKFold(
n_splits=cv_folds, shuffle=True, random_state=rng
).split(dev_df_i, y=dev_df_i['label'].values))
# Create the appropriate statistics container for this iteration.
validation_stats_i = Statistics()
training_stats_i = Statistics()
testing_stats_i = Statistics()
def do_fold(j):
print("\tFold " + str(j+1))
train_idx = folds_i[j][0]
valid_idx = folds_i[j][1]
training_fold = developement_df.loc[train_idx, ]
training_fold = training_fold.reset_index(drop=True)
validation_fold = developement_df.loc[valid_idx, ]
validation_fold = validation_fold.reset_index(drop=True)
# shuffle the folds
training_stats_i_f = Statistics()
validation_stats_i_f = Statistics()
testing_stats_i_f = Statistics()
# Init the label ranking lists.
label_pred_proba_train = []
label_pred_proba_valid = []
label_pred_proba_test = []
label_y_train = []
label_y_valid = []
label_y_test = []
# Set up the vectorizer for the bag-of-words representation
if vectorizer_method == 'tf-idf':
vectorizer = TfidfVectorizer(
stop_words=['go', '', ' '], binary=binary, lowercase=True,
sublinear_tf=True, max_df=1.0, min_df=0
)
vectorizer.fit(training_fold['terms'].values)
alpha = None
percentile = 100
elif vectorizer_method == 'count':
vectorizer = CountVectorizer(
stop_words=['go', '', ' '], binary=binary, lowercase=True
)
vectorizer.fit(training_fold['terms'].values)
alpha = None
percentile = 100
else:
raise TypeError("Vectorizer_method has type {}.".format(type(vectorizer_method)))
selectors = generate_selectors(selection, vectorizer.get_feature_names(), dag)
base_estimators = make_classifiers(method, balanced, labels, selectors, selection, rng)
for label in sorted(labels):
print("\t\tFitting for label {}...".format(label))
# SVMs make the assumption of standardised features. Hence we scale the features
# avoiding the use of mean to maintain the structure of count sparsity. Scaling
# May also help with linear model convergence speed.
x_train_l = vectorizer.transform(training_fold['terms'].values)
y_train_l = np.asarray(training_fold[label].values, dtype=int)
x_valid_l = vectorizer.transform(validation_fold['terms'].values)
y_valid_l = np.asarray(validation_fold[label].values, dtype=int)
x_test_l = vectorizer.transform(testing_df['terms'].values)
y_test_l = np.asarray(test_df_i[label].values, dtype=int)
if scale:
x_train_l = mean_center(x_train_l, with_mean=False)
x_valid_l = mean_center(x_valid_l, with_mean=False)
x_test_l = mean_center(x_test_l, with_mean=False)
# We generate the folds for randomised search up-front. We hold out one of the folds for
# Probability calibration so each sampled param set gets calibrated on the same data.
# This leaves cv_folds-2 folds for randomised search cross-validation.
# cv_rand = StratifiedKFold(n_splits=3, shuffle=True, random_state=rng)
base_estimator_l = base_estimators[label]
fresh_estimator = clone(base_estimator_l)
# Find the best params, then do a final proper calibration.
params = sk_generate_params(method, selection)
estimator_l = RandomizedSearchCV(
estimator=base_estimator_l, param_distributions=params,
n_iter=60, scoring='f1', cv=3, random_state=rng,
error_score=0.0, n_jobs=1, pre_dispatch='2*n_jobs',
refit=True
)
# Test if there's any signal if we permute the labels.
# Classifier should do poorly if we do so.
if permute:
y_train_l = rng.permutation(y_train_l)
threshold = 0.5
estimator_l.fit(x_train_l, y_train_l)
best_params_l = estimator_l.best_params_
# Calibrate the random forest with the best hyperparameters.
if method not in ['lr']:
estimator_l = CalibratedClassifierCV(fresh_estimator.set_params(**best_params_l),
cv=3, method='sigmoid')
estimator_l.fit(x_train_l, y_train_l)
# Evaluate Performance characteristics and test on training to check overfitting.
y_train_prob_l = estimator_l.predict_proba(x_train_l)
y_valid_prob_l = estimator_l.predict_proba(x_valid_l)
y_test_prob_l = estimator_l.predict_proba(x_test_l)
training_stats_i_f.merge(evaluate_model(y_train_l, y_train_prob_l, label, threshold))
validation_stats_i_f.merge(evaluate_model(y_valid_l, y_valid_prob_l, label,threshold))
# Compute independent test data performance
testing_stats_i_f.merge(evaluate_model(y_test_l, y_test_prob_l, label, threshold))
# Get label ranking info
label_pred_proba_train.append([p[1] for p in y_train_prob_l])
label_pred_proba_valid.append([p[1] for p in y_valid_prob_l])
label_pred_proba_test.append([p[1] for p in y_test_prob_l])
label_y_train.append(y_train_l)
label_y_valid.append(y_valid_l)
label_y_test.append(y_test_l)
print(validation_stats_i_f.frame())
# Compute multi-label performance statistics
y = np.vstack(zip(*label_y_train))
y_prob = np.vstack(zip(*label_pred_proba_train))
training_stats_i_f.merge(multi_label_evaluate(y, y_prob, threshold))
y = np.vstack(zip(*label_y_valid))
y_prob = np.vstack(zip(*label_pred_proba_valid))
validation_stats_i_f.merge(multi_label_evaluate(y, y_prob, threshold))
y = np.vstack(zip(*label_y_test))
y_prob = np.vstack(zip(*label_pred_proba_test))
testing_stats_i_f.merge(multi_label_evaluate(y, y_prob, threshold))
return training_stats_i_f, validation_stats_i_f, testing_stats_i_f
# For each iteration, batch the folds into parallel jobs
statistics_objects_i = parallel_map(do_fold, range(cv_folds), n_jobs)
for (train, val, test) in statistics_objects_i:
training_stats_i.merge(train)
validation_stats_i.merge(val)
testing_stats_i.merge(test)
log.write('Iteration {}\n'.format(i))
log.write('Training {}\n'.format(i))
training_stats_i.write(log, 'a')
log.write('Validation {}\n'.format(i))
validation_stats_i.write(log, 'a')
log.write('Testing {}\n'.format(i))
testing_stats_i.write(log, 'a')
statistics_objects.append([training_stats_i, validation_stats_i, testing_stats_i])
# return training_stats_i, validation_stats_i, testing_stats_i
# containers = parallel_map(do_iteration, range(iterations), n_jobs=n_jobs)
train_containers = [statistics_objects[i][0] for i in range(iterations)]
valid_containers = [statistics_objects[i][1] for i in range(iterations)]
test_containers = [statistics_objects[i][2] for i in range(iterations)]
for train in train_containers:
training_stats.merge(train)
for valid in valid_containers:
validation_stats.merge(valid)
for test in test_containers:
testing_stats.merge(test)
# --------------------- FINAL RESULTS ---------------------------- #
method = method.upper()
config[cv_folds] = cv_folds
training_stats.frame().to_csv("{}/training-{}.tsv".format(direc, file_suffix), index=False)
validation_stats.frame().to_csv("{}/validation-{}.tsv".format(direc, file_suffix), index=False)
testing_stats.frame().to_csv("{}/testing-{}.tsv".format(direc, file_suffix), index=False)
pickle.dump(config, open('{}/{}-configuration.pkl'.format(direc, method), 'w'))
results = open('{}/{}-results.txt'.format(direc, method), 'w')
results.write("\nRun Settings: \n")
results.write("\tDate: \t\t\t\t{0}\n".format(date))
results.write("\tMethod: \t\t\t{0}\n".format(method))
results.write("\tBinary: \t\t\t{0}\n".format(binary))
results.write("\tBalanced: \t\t\t{0}\n".format(balanced))
results.write("\tInduced: \t\t\t{0}\n".format(induce))
results.write("\tPermuted:\t\t\t{0}\n".format(permute))
results.write("\tScaled:\t\t\t{0}\n".format(scale))
results.write("\tIterations:\t\t\t{0}\n".format(iterations))
results.write("\tFolds:\t\t\t\t{0}\n".format(cv_folds))
results.write("\tSelection:\t\t\t{0}\n".format(selection))
results.write("\tOntologies:\t\t\t{0}\n".format(ontologies))
results.write("\tVectorizer:\t\t\t{0}\n".format(vectorizer_method))
results.write("\nValidation performance:\n")
validation_stats.write(results, mode='a')
results.write("\nTest performance:\n")
testing_stats.write(results, mode='a')
results.write("\nTraining performance:\n")
training_stats.write(results, mode='a')
results.close()
log.close()
| gpl-2.0 |
fyears/tmpy | tmpy/weight.py | 1 | 2201 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
"""all the package built-in weight functions"""
from collections import Counter
from numpy import log
import pandas as pd
def get_weight_functions():
return ["weight_tf", "weight_tf_idf"]
def weight_tf(x, dictionary=None):
"""generate the term frequency based on the input documents tokens list.
Parameters
==========
x: list of the documents tokens
for example, [["i", "think", "i", "am", "ok"], ["yes", "you", "are", "ok"]]
dictionary: None (default) or list of the tokens to be built in the tf matrix
for example, None or ["ok","yes"]
Returns
==========
a sparse DataFrame of the term document matrix
"""
#TODO try to find out a memory friendly solution
if dictionary is None:
y = [Counter(item) for item in x]
else:
y = [Counter({k:v for k,v in Counter(item).iteritems() if k in dictionary}) for item in x]
z = [pd.Series(item.values(), index=item.keys(), dtype='int64') for item in y]
result = pd.concat(z, axis=1).fillna(0).to_sparse(fill_value=0)
return result
def weight_tf_idf(x, dictionary=None, smooth=False):
"""generate the term frequency–inverse document frequency based on the input documents tokens list.
Parameters
==========
x: list of the documents tokens
for example, [["i", "think", "i", "am", "ok"], ["yes", "you", "are", "ok"]]
dictionary: None (default) or list of the tokens to be built in the tf-idf matrix
for example, None or ["ok","yes"]
Returns
==========
a sparse DataFrame of the term frequency–inverse document frequency matrix
"""
#TODO try to find out a memory friendly solution
docs_counts = len(x)
smoothness = 1 if smooth else 0
tf_dtm = (weight_tf(x, dictionary)).transpose().to_dense()
terms_occured_docs_count = docs_counts - (tf_dtm==0).sum()
idf_terms = log(docs_counts / terms_occured_docs_count)
idf_dtm = tf_dtm * idf_terms
idf_tdm = idf_dtm.transpose().to_sparse(fill_value=0)
return idf_tdm #return the tdm, not the dtm
| bsd-3-clause |
COHRINT/cops_and_robots | src/cops_and_robots/fusion/grid.py | 1 | 14555 | #!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "[email protected]"
__status__ = "Development"
import os
import sys
import logging
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from scipy.sparse import csr_matrix
from shapely.geometry import Point
from cops_and_robots.fusion.probability import Probability
from cops_and_robots.fusion.gaussian_mixture import fleming_prior
class Grid(Probability):
"""short description of Grid
long description of Grid
Parameters
----------
param : param_type, optional
param_description
Attributes
----------
attr : attr_type
attr_description
Methods
----------
attr : attr_type
attr_description
"""
def __init__(self, bounds=[-10, -10, 10, 10], res=0.1, prior='fleming',
all_dims=False, is_dynamic=True, max_range=1.0, var=1.0,
feasible_region=None, use_STM=True):
if prior == 'fleming':
bounds = [-9.5, -3.33, 4, 3.68]
super(Grid, self).__init__(bounds=bounds, res=res)
self._discretize(all_dims)
if feasible_region is not None:
self.identify_feasible_region(feasible_region)
self.is_dynamic = is_dynamic
if is_dynamic:
self.max_range = max_range
self.var = var
self.use_STM = use_STM
if use_STM:
self._create_STM()
else:
self._create_distance_matrix()
self.update_transition_probs = True
if prior == 'fleming':
self.prob = fleming_prior().pdf(self.pos, dims=[0,1])
self.prob = np.reshape(self.prob, self.X.shape)
else:
self.prob = np.ones_like(self.X)
self.prob /= self.prob.sum()
# self.keep_feasible_region()
def __str__(self):
try:
num_states = 1
for shape in self.pos.shape:
num_states *= shape
num_states /= self.pos.shape[-1]
except:
num_states = '?'
return 'Gridded probability ({} states)'.format(int(num_states))
def measurement_update(self, likelihood, measurement=None, **kwargs):
"""Bayesian update of a prior probability with a sensor likelihood.
Provide likelihood as either a discretized numpy array or as a softmax
model with an associated measurement class.
"""
# Discretize likelihood if given as a softmax object
if type(likelihood) != np.ndarray:
likelihood = likelihood.probability(class_=measurement,
state=self.pos)
# Perform Bayes' update
posterior = likelihood * self.prob.flatten()
posterior /= posterior.sum()
self.prob = np.reshape(posterior, self.X.shape)
def dynamics_update(self, n_steps=1, velocity_state=None):
if self.use_STM is False and self.update_transition_probs:
logging.info('Updating transition probabilities...')
self.transition_probs = velocity_state.pdf(self.distance_matrix)
self.update_transition_probs = False
if self.is_dynamic:
posterior = self.prob.flatten()
for step in range(n_steps):
# self.state_transition_matrix += 0.1 * np.eye(self.state_transition_matrix.shape[1])
if self.use_STM:
posterior = self.state_transition_matrix .dot (posterior)
else:
posterior = self.transition_probs .dot (posterior)
posterior /= posterior.sum()
self.prob = posterior.reshape(self.X.shape)
# print
def find_MAP(self, dims=[0,1]):
"""formerly 'max_point_by_grid'
Assume 2D MAP for now
"""
pt = np.unravel_index(self.prob.argmax(), self.X.shape)
MAP_point = np.array([self.X[pt[0],0], self.Y[0,pt[1]]])
MAP_value = self.prob[pt]
return MAP_point, MAP_value
def pdf(self, x=None):
#<>TODO: specify dimensions for evaluation
x = np.asarray(x)
if x.shape[0] < self.ndims: # evaluating at lower dimensionality
prob = self.probs
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html#scipy.interpolate.griddata
# <>TODO: If x doesn't align with grid points, interpolate
def as_grid(self, all_dims=False):
return self.prob
def identify_feasible_region(self, feasible_region):
self.infeasible_states = []
for state_i, pos in enumerate(self.pos):
pt = Point(pos)
if not feasible_region.contains(pt):
self.infeasible_states.append(state_i)
def keep_feasible_region(self):
prob = self.prob.copy().flatten()
prob[self.infeasible_states] = 0
prob /= prob.sum()
self.prob = prob.reshape(self.X.shape)
def _discretize(self, bounds=None, res=None, all_dims=False):
if res is not None:
self.res = res
if bounds is None and self.bounds is None:
b = [-10, 10] # bounds in any dimension
bounds = [[d] * self.ndims for d in b] # apply bounds to each dim
self.bounds = [d for dim in bounds for d in dim] # flatten bounds
elif self.bounds is None:
self.bounds = bounds
# Create grid
if self.ndims == 1:
x = np.arange(self.bounds[0], self.bounds[1], res)
self.x = x
self.pos = x
elif self.ndims >= 2:
logging.debug('Using first two variables as x and y')
X, Y = np.mgrid[self.bounds[0]:self.bounds[2] + self.res:self.res,
self.bounds[1]:self.bounds[3] + self.res:self.res]
pos = np.empty(X.shape + (2,))
self.X = X; self.Y = Y
pos = np.dstack((self.X, self.Y))
self.pos = np.reshape(pos, (self.X.size, 2))
if all_dims:
#<>TODO: use more than the ndims == 4 case
full_bounds = self.bounds[0:2] + [-0.5, -0.5] \
+ self.bounds[2:] + [0.5, 0.5]
v_spacing = 0.1
grid = np.mgrid[full_bounds[0]:full_bounds[4] + res:res,
full_bounds[1]:full_bounds[5] + res:res,
full_bounds[2]:full_bounds[6] + v_spacing:v_spacing,
full_bounds[3]:full_bounds[7] + v_spacing:v_spacing,
]
pos = np.empty(grid[0].shape + (4,))
pos[:, :, :, :, 0] = grid[0]
pos[:, :, :, :, 1] = grid[1]
pos[:, :, :, :, 2] = grid[2]
pos[:, :, :, :, 3] = grid[3]
self.pos_all = pos
else:
logging.error('This should be impossible, a gauss mixture with no variables')
raise ValueError
def _create_distance_matrix(self):
n = self.pos.shape[0]
directory = os.path.dirname(__file__) + '/STMs'
if not os.path.exists(directory):
os.makedirs(directory)
# Try to load a precomputed distance matrix
if hasattr(self, 'infeasible_states'):
feas_str = '_feasible'
else:
feas_str = ''
filename = '{}/DM_n{}_r{}_{}.npy'.format(directory, n, self.res,
feas_str)
try:
self.distance_matrix = np.load(filename)
logging.info('Loaded distance matrix {}.'.format(filename))
return
except:
logging.info('No distance matrix to load for {}, creating... '
.format({'resolution': self.res,
'feasible': hasattr(self,'infeasible_states')
}))
logging.info('\n (takes a while for large state spaces)')
# Create a Distance matrix
self.distance_matrix = np.empty((n,n,2))
for state_i, p in enumerate(self.pos):
# Identify distance components
dist = p - self.pos
# Knock out infeasible cells
if hasattr(self, 'infeasible_states'):
# dist[self.infeasible_states] = np.inf
dist[self.infeasible_states] = 1000
# Save state transition probabilities from state_i
self.distance_matrix[:, state_i] = dist
progress = state_i/n * 100
if state_i % 100 == 0:
logging.info('Progress: {:.0f}% complete of {} by {} state transition matrix'
.format(progress, n, n))
# Save
logging.info('Saved distance matrix as {}.'.format(filename))
np.save(filename, self.distance_matrix)
def _create_STM(self):
n = self.pos.shape[0]
directory = os.path.dirname(__file__) + '/STMs'
if not os.path.exists(directory):
os.makedirs(directory)
# Try to load a precomputed STM
if hasattr(self, 'infeasible_states'):
feas_str = '_feasible'
else:
feas_str = ''
filename = '{}/STM_n{}_r{}_v{}{}.npy'.format(directory, n, self.res,
self.var, feas_str)
try:
self.state_transition_matrix = np.load(filename).item()
logging.info('Loaded STM {}.'.format(filename))
return
except:
logging.info('No state transition matrix to load for {}, creating... '
.format({'resolution': self.res,
'var': self.var,
'feasible': hasattr(self,'infeasible_states')
}))
logging.info('\n (takes a while for large state spaces)')
# Create a STM
state_transition_matrix = np.empty((n,n))
covariance = np.eye(self.pos.shape[-1]) * self.var
for state_i, p in enumerate(self.pos):
# Identify nearby cells
norm = np.linalg.norm(p - self.pos, ord=2, axis=1)
nearby_cells = np.where(norm < self.max_range)[0]
# Knock out infeasible cells
if hasattr(self, 'infeasible_states'):
nearby_cells = [c for c in nearby_cells
if c not in self.infeasible_states]
# Sample a gaussian for the state transition probability
mean = p
cell_trans = np.zeros(n)
for nearby_cell in nearby_cells:
X = self.pos[nearby_cell]
cell_trans[nearby_cell] = multivariate_normal.pdf(X, mean, covariance)
cell_trans /= cell_trans.sum()
# Save state transition probabilities from state_i
state_transition_matrix[:, state_i] = cell_trans
progress = state_i/n * 100
if state_i % 100 == 0:
logging.info('Progress: {:.0f}% complete of {} by {} state transition matrix'
.format(progress, n, n))
# Sparsify and save
self.state_transition_matrix = csr_matrix(state_transition_matrix)
logging.info('Saved STM as {}.'.format(filename))
np.save(filename, self.state_transition_matrix)
def test_dynamics_update(use_STM=True, res=0.2, speed=0.5, vel_var=0.01):
import matplotlib.animation as animation
from cops_and_robots.fusion.gaussian_mixture import velocity_prior
probability = Grid(use_STM=use_STM, res=res)
vp = velocity_prior(speed=speed, var=vel_var)
fig = plt.figure()
ax = fig.add_subplot(111)
def dm_update(i):
probability.dynamics_update(velocity_state=vp)
title = probability.__str__() + '@ time {}'.format(i)
probability.update_plot(i, title=title)
ani = animation.FuncAnimation(fig, dm_update,
frames=xrange(100),
interval=100,
repeat=True,
blit=False
)
# ani.save('demoanimation.gif', writer='imagemagick', fps=10);
plt.show()
def test_measurement_update():
import itertools
import time
from cops_and_robots.fusion.camera import Camera
from descartes.patch import PolygonPatch
import matplotlib.animation as animation
probability = Grid()
camera = Camera()
camera.viewcone.alpha = 0.8
camera.viewcone.color = 'none'
poses = [[0,0,-180],
[-1,0,-180],
[-1.5,0,-160],
[-1.6,0,-120],
[-1.8,-0.5,-120],
[-2.4,-0.8,-140],
[-3.0,-0.8,-180],
]
poses = itertools.cycle(poses)
measurement = 'No Detection'
fig = plt.figure()
ax = fig.add_subplot(111)
def tm_update(i, poses):
pose = next(poses)
camera.update_viewcone(pose)
poly = camera.detection_model.poly # for plotting
likelihood = camera.detection_model
probability.measurement_update(likelihood, measurement)
probability.update_plot(i)
patch = camera.viewcone.get_patch()
ax.add_patch(patch)
time.sleep(0.5)
# patch.remove()
ani = animation.FuncAnimation(fig, tm_update,
frames=xrange(100),
fargs=[poses],
interval=5,
repeat=True,
blit=False
)
plt.show()
def uniform_prior(feasible_region=None, use_STM=True):
bounds = [-9.5, -3.33, 4, 3.68]
probability = Grid(prior='uniform', bounds=bounds, use_STM=use_STM,
feasible_region=feasible_region)
return probability
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
# grid = Grid()
# grid.plot()
# plt.show()
# test_measurement_update()
test_dynamics_update(use_STM=False, res=0.4, speed=0.2, vel_var=0.01) | apache-2.0 |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/plot/transmission.py | 1 | 6624 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.plotting.transmission Contains the TransmissionPlotter class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from textwrap import wrap
import matplotlib.pyplot as plt
from collections import OrderedDict
import matplotlib.colors as colors
import matplotlib.cm as cmx
# Import the relevant PTS classes and modules
from ..tools.logging import log
# -----------------------------------------------------------------
line_styles = ['-', '--', '-.', ':']
filled_markers = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd']
pretty_colors = ["r", "dodgerblue", "purple", "darkorange", "lawngreen", "yellow", "darkblue", "teal", "darkgreen", "lightcoral", "crimson", "saddlebrown"]
# -----------------------------------------------------------------
class TransmissionPlotter(object):
"""
This class ...
"""
def __init__(self, title=None):
"""
This function ...
:return:
"""
# Set the title
self.title = title
# The different curves
self.curves = OrderedDict()
# The wavelengths
self.wavelengths = []
# The axis limits
self.min_wavelength = None
self.max_wavelength = None
self.min_transmission = None
self.max_transmission = None
# The figure
self._figure = None
# Properties
self.size = (17,4)
self.colormap = "rainbow" # or "nipy_spectral"
self.format = None
self.transparent = False
# -----------------------------------------------------------------
def set_title(self, title):
"""
This function ...
:param title:
:return:
"""
self.title = title
# -----------------------------------------------------------------
def add_transmission_curve(self, transmission_curve, label):
"""
This function ...
:param transmission_curve:
:param label:
:return:
"""
# Add the transmission curve
self.curves[label] = transmission_curve
# -----------------------------------------------------------------
def add_wavelength(self, wavelength):
"""
This function ...
:param wavelength:
:return:
"""
# Add the wavelength
self.wavelengths.append(wavelength)
# -----------------------------------------------------------------
def run(self, output_path, min_wavelength=None, max_wavelength=None, min_transmission=None, max_transmission=None):
"""
This function ...
:param output_path:
:param min_wavelength:
:param max_wavelength:
:param min_transmission:
:param max_transmission:
:return:
"""
# Set the axis limits
self.min_wavelength = min_wavelength
self.max_wavelength = max_wavelength
self.min_transmission = min_transmission
self.max_transmission = max_transmission
# Make the plot
self.plot(output_path)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the transmission plotter ...")
# Set default values for all attributes
self.title = None
self.curves = OrderedDict()
self.wavelengths = []
self.min_wavelength = None
self.max_wavelength = None
self.min_transmission = None
self.max_transmission = None
self._figure = None
self.colormap = "rainbow"
self.format = None
self.transparent = False
# -----------------------------------------------------------------
def plot(self, path):
"""
This function ...
:param path:
:return:
"""
# Inform the user
log.info("Plotting the transmission plot ...")
# Create the figure
self._figure = plt.figure(figsize=self.size)
plt.ylabel('$T_\lambda$', fontsize=28)
plt.xlabel('$\lambda/\mu$m', fontsize=28)
# Set axes limits
plt.xlim(self.min_wavelength, self.max_wavelength)
plt.ylim(self.min_transmission, self.max_transmission)
plt.xscale('log')
plt.tick_params(labelsize=17)
# Get the color map
cm = plt.get_cmap(self.colormap)
cNorm = colors.Normalize(vmin=0, vmax=len(self.curves) - 1.)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
# Sort the labels based on the peak wavelength
sorted_labels = sorted(self.curves.keys(), key=lambda label: self.curves[label].peak_wavelength)
# Plot the transmission curves
counter = 0
for label in sorted_labels:
curve = self.curves[label]
wavelengths = curve.wavelengths(unit="micron", add_unit=False)
transmissions = curve.transmissions()
colorVal = scalarMap.to_rgba(counter)
# Plot the curve
#plt.plot(wavelengths, transmissions, label=label, linewidth=2, color=colorVal)
plt.fill(wavelengths, transmissions, label=label, linewidth=2, color=colorVal, alpha=0.5)
counter += 1
# Plot the wavelengths
for wavelength in self.wavelengths:
plt.axvline(wavelength.to("micron").value, color="0.8")
# Set the title
if self.title is not None: self._figure.suptitle("\n".join(wrap(self.title, 60)))
# plt.tight_layout()
# Debugging
if type(path).__name__ == "BytesIO": log.debug("Saving the SED plot to a buffer ...")
elif path is None: log.debug("Showing the SED plot ...")
else: log.debug("Saving the SED plot to " + str(path) + " ...")
if path is not None:
# Save the figure
plt.savefig(path, bbox_inches='tight', pad_inches=0.25, transparent=self.transparent, format=self.format)
else: plt.show()
plt.close()
# -----------------------------------------------------------------
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/api/line_with_text.py | 6 | 1649 | """
Show how to override basic methods so an artist can contain another
artist. In this case, the line contains a Text instance to label it.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 20)
line = MyLine(x, y, mfc='red', ms=12, label='line label')
#line.text.set_text('line label')
line.text.set_color('red')
line.text.set_fontsize(16)
ax.add_line(line)
plt.show()
| gpl-2.0 |
hlin117/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 17 | 34869 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X : array_like
An array with shape (n_samples, n_features)
Returns
-------
D : array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij : arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
.. deprecated:: 0.18
This class will be removed in 0.20.
Use the :class:`GaussianProcessRegressor` instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state : int, RandomState instance or None, optional (default=None)
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, int(n_eval / batch_size))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/tests/test_cross_validation.py | 7 | 47033 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
vdods/heisenberg | attic/action.py | 1 | 28209 | import sys
sys.path.append('library')
import fractions
import math
import numpy as np
import realified_fourier
import symbolic
import tensor
import time
def call_func_and_print_timing_info (func_name, func, *args, **kwargs):
print 'calling {0} ...'.format(func_name)
start = time.time()
retval = func(*args, **kwargs)
print '{0} took {1} s.'.format(func_name, time.time() - start)
return retval
# This and einsum_for_two is from http://stackoverflow.com/questions/15606937/how-do-i-get-numpy-einsum-to-play-well-with-sympy
def alt_einsum(string, *args):
index_groups = map(list, string.split(','))
assert len(index_groups) == len(args)
tensor_indices_tuples = zip(index_groups, args)
return reduce(einsum_for_two, tensor_indices_tuples)[1]
def einsum_for_two(tensor_indices1, tensor_indices2):
string1, tensor1 = tensor_indices1
string2, tensor2 = tensor_indices2
sum_over_indices = set(string1).intersection(set(string2))
new_string = string1 + string2
axes = ([], [])
for i in sum_over_indices:
new_string.remove(i)
new_string.remove(i)
axes[0].append(string1.index(i))
axes[1].append(string2.index(i))
return new_string, np.tensordot(tensor1, tensor2, axes)
def generate_zeta (M, contraction):
# TODO: get rid of M_inv if possible
M_inv = {m:i for i,m in enumerate(M)}
L = list(frozenset(m1-m2 for m1 in M for m2 in M))
L.sort()
L_inv = {l:i for i,l in enumerate(L)}
# T is the 3-tensor defined by T:(w \otimes z) = \bar{w}z, where w and z are
# complex numbers identified as points in \mathbb{R}^2.
T = np.zeros((2,2,2))
T[0,0,0] = 1.0
T[0,1,1] = 1.0
T[1,0,1] = 1.0
T[1,1,0] = -1.0
# zeta_tensor is the 3-tensor defining the quadratic function zeta_M.
zeta_tensor = np.zeros((2*len(L), 2*len(M), 2*len(M)))
for l in L:
if l == 0:
continue
i = L_inv[l]
for m in M:
if l+m not in M:
continue
j = M_inv[m]
k = M_inv[l+m]
zeta_tensor[2*i:2*(i+1),2*j:2*(j+1),2*k:2*(k+1)] += T*(l+m)/(2*l)
def zeta (R):
assert len(R) == 2*len(M), 'not enough input params.'
if contraction == 'np.einsum':
return np.einsum('ijk,j,k', zeta_tensor, R, R)
elif contraction == 'tensor.contract':
return tensor.contract('ijk,j,k', zeta_tensor, R, R, dtype=R.dtype)
else:
return alt_einsum('ijk,j,k', zeta_tensor, R, R)
return zeta,L,zeta_tensor
def generate_imag_Q (M, period):
# This is the imaginary part of 0.5*cmath.exp(2.0j*math.pi/period).
half_imag_rotation = 0.5*math.sin(2.0*math.pi/period)
def imag_Q (R):
assert len(R.shape) == 1
assert len(R) == 2*len(M), 'not enough input params.'
return half_imag_rotation*sum(m*(R[2*i]**2 + R[2*i+1]**2) for i,m in enumerate(M))
return imag_Q
def generate_integrate_over_sample_times (sample_times, period, contraction):
assert len(sample_times) > 0, 'sample_times must be nonempty.'
assert all(sample_times[i+1] > sample_times[i] for i in range(len(sample_times)-1)), 'sample_times must be a strictly increasing sequence.'
assert period > sample_times[-1], 'period must be greater than last element of sample_times.'
L = period - sample_times[0]
integral_covector = np.array([sample_times[i+1] - sample_times[i] for i in range(len(sample_times)-1)] + [period - sample_times[-1]])
assert len(integral_covector) == len(sample_times)
def integrate_over_sample_times (X):
if contraction == 'np.einsum':
return np.einsum('j,j', integral_covector, X)
elif contraction == 'tensor.contract':
return tensor.contract('j,j', integral_covector, X, dtype=X.dtype)
else:
return alt_einsum('j,j', integral_covector, X)
return integrate_over_sample_times
def generate_imag_projection_over_sample_times (sample_times):
imag_projection_matrix = np.ndarray((len(sample_times),2*len(sample_times)), \
dtype=float, \
buffer=np.array([[1.0 if (c%2==1 and c//2==r) else 0.0 for c in range(2*len(sample_times))] \
for r in range(len(sample_times))]))
def imag_projection_over_sample_times (wz_samples):
return imag_projection_matrix.dot(wz_samples)
return imag_projection_over_sample_times,imag_projection_matrix
def chi ((R2_vector,R_vector)):
"""Interleave (R^2)^n with R^n to make (R^3)^n."""
assert len(R2_vector) == 2*len(R_vector)
n = len(R_vector)
return np.array([[R2_vector[2*i],R2_vector[2*i+1],R_vector[i]] for i in range(n)])
def eta ((U,V)):
assert len(U.shape) == 2
assert U.shape[1] == 3
assert U.shape == V.shape
n = U.shape[1]
retval = np.ndarray((U.shape[0],2*n), dtype=U[0].dtype)
retval[:,:n] = U
retval[:,n:] = V
return retval
def generate_hamiltonian (alpha, beta):
def hamiltonian (pv):
assert len(pv) == 6
mu = (pv[0]**2 + pv[1]**2)**2 + beta*pv[2]**2
# First term is kinetic energy, second is potential.
return 0.5*(pv[3]**2 + pv[4]**2) - alpha*mu**(-0.5)
return hamiltonian
def generate_hamiltonian_v (hamiltonian):
def hamiltonian_v (PV):
assert PV.shape[1] == 6
return np.array([hamiltonian(pv) for pv in PV])
return hamiltonian_v
def generate_hamiltonian_vector_field (alpha, beta):
# -\omega*dH is the hamiltonian vector field for this system
# X is the list of coordinates [x, y, z, p_x, p_y, p_z]
# t is the time at which to evaluate the flow. This particular vector field is independent of time.
def hamiltonian_vector_field (X, t):
assert len(X) == 6, "must have 6 coordinates"
x = X[0]
y = X[1]
z = X[2]
p_x = X[3]
p_y = X[4]
p_z = X[5]
P_x = p_x - 0.5*y*p_z
P_y = p_y + 0.5*x*p_z
r = x**2 + y**2
mu = r**2 + beta*z**2
# alpha = 2.0/math.pi
# alpha = 1.0
alpha_times_mu_to_neg_three_halves = alpha*mu**(-1.5)
return np.array([P_x, \
P_y, \
0.5*x*P_y - 0.5*y*P_x, \
-0.5*P_y*p_z - alpha_times_mu_to_neg_three_halves*r*2.0*x, \
0.5*P_x*p_z - alpha_times_mu_to_neg_three_halves*r*2.0*y, \
-16.0*alpha_times_mu_to_neg_three_halves*z],
dtype=float)
return hamiltonian_vector_field
def generate_lagrangian (alpha, beta):
def lagrangian (pv):
# NOTE that this doesn't use pv[5] (i.e. dz/dt) at all.
assert len(pv) == 6
mu = (pv[0]**2 + pv[1]**2)**2 + beta*pv[2]**2
# First term is kinetic energy, second is potential.
return 0.5*(pv[3]**2 + pv[4]**2) + alpha*mu**(-0.5)
return lagrangian
def generate_lagrangian_v (lagrangian):
def lagrangian_v (PV):
assert PV.shape[1] == 6
return np.array([lagrangian(pv) for pv in PV])
return lagrangian_v
def load_cache_or_compute (cache_filename, computation, *args, **kwargs):
import pickle
try:
print 'attempting to unpickle from file \'{0}\'.'.format(cache_filename)
cached_value = pickle.load(open(cache_filename, 'r'))
print 'unpickling succeeded -- returning unpickled value.'
return cached_value
except:
print 'unpickling failed -- computing value.'
start = time.time()
computed_value = computation(*args, **kwargs)
print 'value computed in {0} s.'.format(time.time() - start)
try:
print 'attempting to pickle computed value to file \'{0}\'.'.format(cache_filename)
pickle.dump(computed_value, open(cache_filename, 'w'))
print 'pickling succeeded -- returning computed value.'
except:
print 'WARNING: Could not pickle data to file \'{0}\' -- returning computed value.'.format(cache_filename)
return computed_value
class ProblemContext:
def __init__ (self, **kwargs):
"""
Required keyword arguments:
symmetry_degree : Positive integer indicating the degree of symmetry (e.g. 3-fold, 5-fold).
symmetry_class : Positive integer, coprime to symmetry_degree, indicating the particular
class of symmetry. TODO: describe this
xy_mode_count : The number of modes used in the Fourier sum for the xy curve.
sample_count : A positive integer indicating how many samples will be used to represent the xy curve.
period : A positive float indicating the period of the curve.
contraction : Specifies the method used to contract tensors. One of: 'np.einsum', 'tensor.contract',
'alt_einsum'. Note that dtype=object can't use np.einsum (for some dumb reason).
Default is 'np.einsum', because it is the fastest.
alpha : The alpha parameter of the Hamiltonian.
beta : The beta parameter of the Hamiltonian.
"""
self.parameter_string = repr(sorted(kwargs.iteritems()))
self.symmetry_degree = kwargs['symmetry_degree']
self.symmetry_class = kwargs['symmetry_class']
assert fractions.gcd(self.symmetry_class, self.symmetry_degree), 'symmetry_class and symmetry_degree kwargs must be coprime.'
assert 0 < self.symmetry_class < self.symmetry_degree, 'symmetry_class must be between 0 and symmetry_degree.'
self.xy_mode_count = kwargs['xy_mode_count']
assert self.xy_mode_count > 0, 'xy_mode_count must be positive.'
# Make floor(self.xy_mode_count/2) positive modes and ceiling(self.xy_mode_count/2) negative modes.
xy_mode_lower_bound = self.symmetry_class - self.symmetry_degree*((self.xy_mode_count+1)//2)
xy_mode_upper_bound = self.symmetry_class + self.symmetry_degree*(self.xy_mode_count//2)
self.xy_modes = range(xy_mode_lower_bound, xy_mode_upper_bound, self.symmetry_degree)
assert len(self.xy_modes) == self.xy_mode_count
self.sample_count = kwargs['sample_count']
self.period = kwargs['period']
self.sample_times = np.linspace(0.0, self.period, self.sample_count+1)[:-1] # Take all but the last element.
assert len(self.sample_times) > 0, 'sample_times must be nonempty.'
assert all(self.sample_times[i+1] > self.sample_times[i] for i in range(len(self.sample_times)-1)), 'sample_times must be a strictly increasing sequence.'
assert self.period > self.sample_times[-1], 'period must be greater than last element of sample_times.'
self.contraction = kwargs.get('contraction', 'np.einsum')
assert self.contraction in ['np.einsum', 'tensor.contract', 'alt_einsum']
self.alpha = kwargs['alpha']
self.beta = kwargs['beta']
start = time.time(); self.zeta_M,self.wz_modes,self.zeta_tensor = generate_zeta(self.xy_modes, self.contraction)#; print 'generate_zeta: {0} s'.format(time.time() - start)
print 'len(sample_times) = {0}, len(xy_modes) = {1}, len(wz_modes) = {2}'.format(len(self.sample_times), len(self.xy_modes), len(self.wz_modes))
start = time.time(); self.F_xy = realified_fourier.Transform(self.xy_modes, self.sample_times, self.period)#; print 'F_xy: {0} s'.format(time.time() - start)
start = time.time(); self.F_wz = realified_fourier.Transform(self.wz_modes, self.sample_times, self.period)#; print 'F_wz: {0} s'.format(time.time() - start)
start = time.time(); self.imag_projection_over_sample_times,self.imag_projection_matrix = generate_imag_projection_over_sample_times(self.sample_times)#; print 'generate_imag_projection_over_sample_times: {0} s'.format(time.time() - start)
start = time.time(); self.imag_Q = generate_imag_Q(self.xy_modes, self.F_xy.omega)#; print 'generate_imag_Q: {0} s'.format(time.time() - start)
start = time.time(); self.integrate_over_sample_times = generate_integrate_over_sample_times(self.sample_times, self.period, self.contraction)#; print 'generate_integrate_over_sample_times: {0} s'.format(time.time() - start)
def xy_curve (R):
if self.contraction == 'np.einsum':
return np.einsum('ij,j', self.F_xy.samples_from_coeffs_matrix, R)
elif self.contraction == 'tensor.contract':
return tensor.contract('ij,j', self.F_xy.samples_from_coeffs_matrix, R, dtype=R.dtype)
else:
return alt_einsum('ij,j', self.F_xy.samples_from_coeffs_matrix, R)
# start = time.time(); z_curve_tensor = np.einsum('ij,jk,klm', self.imag_projection_matrix, self.F_wz.samples_from_coeffs_matrix, self.zeta_tensor); print 'z_curve_tensor (with shape {0}): {1} s'.format(z_curve_tensor.shape, time.time() - start)
start = time.time(); z_curve_tensor = np.einsum('ij,jkl', np.einsum('ij,jk', self.imag_projection_matrix, self.F_wz.samples_from_coeffs_matrix), self.zeta_tensor); print 'z_curve_tensor (with shape {0}): {1} s'.format(z_curve_tensor.shape, time.time() - start)
def z_curve (R):
if self.contraction == 'np.einsum':
return np.einsum('ijk,j,k', z_curve_tensor, R, R)# + self.imag_Q(R)*self.sample_times
elif self.contraction == 'tensor.contract':
return tensor.contract('ijk,j,k', z_curve_tensor, R, R, dtype=R.dtype)# + self.imag_Q(R)*self.sample_times
else:
return alt_einsum('ijk,j,k', z_curve_tensor, R, R)# + self.imag_Q(R)*self.sample_times
start = time.time(); xy_prime_curve_matrix = np.einsum('ij,jk', self.F_xy.samples_from_coeffs_matrix, self.F_xy.time_derivative_matrix); print 'xy_prime_curve_matrix (with shape {0}): {1} s'.format(xy_prime_curve_matrix.shape, time.time() - start)
def xy_prime_curve (R):
if self.contraction == 'np.einsum':
return np.einsum('ij,j', xy_prime_curve_matrix, R)
elif self.contraction == 'tensor.contract':
return tensor.contract('ij,j', xy_prime_curve_matrix, R, dtype=R.dtype)
else:
return alt_einsum('ij,j', xy_prime_curve_matrix, R)
# start = time.time(); z_prime_curve_tensor = np.einsum('ij,jk,kl,lmn', self.imag_projection_matrix, self.F_wz.samples_from_coeffs_matrix, self.F_wz.time_derivative_matrix, self.zeta_tensor); print 'z_prime_curve_tensor (with shape {0}): {1} s'.format(z_prime_curve_tensor.shape, time.time() - start)
start = time.time(); z_prime_curve_tensor = np.einsum('ij,jkl', np.einsum('ij,jk', np.einsum('ij,jk', self.imag_projection_matrix, self.F_wz.samples_from_coeffs_matrix), self.F_wz.time_derivative_matrix), self.zeta_tensor); print 'z_prime_curve_tensor (with shape {0}): {1} s'.format(z_prime_curve_tensor.shape, time.time() - start)
vector_of_ones = np.array([1.0 for _ in self.sample_times])
def z_prime_curve (R):
if self.contraction == 'np.einsum':
return np.einsum('ijk,j,k', z_prime_curve_tensor, R, R)# + self.imag_Q(R)*vector_of_ones
elif self.contraction == 'tensor.contract':
return tensor.contract('ijk,j,k', z_prime_curve_tensor, R, R, dtype=R.dtype)# + self.imag_Q(R)*vector_of_ones
else:
return alt_einsum('ijk,j,k', z_prime_curve_tensor, R, R)# + self.imag_Q(R)*vector_of_ones
z_prime_curve_dummy_tensor = np.zeros(self.sample_count)
def z_prime_curve_dummy (R):
return z_prime_curve_dummy_tensor
self.position = lambda R : chi((xy_curve(R), z_curve(R)))
# self.velocity = lambda R : chi((xy_prime_curve(R), z_prime_curve(R)))
self.velocity = lambda R : chi((xy_prime_curve(R), z_prime_curve_dummy(R)))
self.position_and_velocity = lambda R : eta((self.position(R), self.velocity(R)))
self.lagrangian = generate_lagrangian(self.alpha, self.beta)
self.lagrangian_v = generate_lagrangian_v(self.lagrangian)
self.hamiltonian = generate_hamiltonian(self.alpha, self.beta)
self.hamiltonian_v = generate_hamiltonian_v(self.hamiltonian)
self.action = lambda R : self.integrate_over_sample_times(self.lagrangian_v(self.position_and_velocity(R)))
self.Lambda = lambda R_lagmult : self.action(R_lagmult[:-1]) + R_lagmult[-1]*self.imag_Q(R_lagmult[:-1])
def generate_symbolic_functions (self):
def generate_variables ():
R_lagmult_vars = np.ndarray((2*self.xy_mode_count+1,), dtype=object)
R_lagmult_vars[:-1] = symbolic.tensor('R', (2*self.xy_mode_count,))
R_lagmult_vars[-1] = symbolic.variable('lambduh')
return R_lagmult_vars
def compute_diff_and_print_progress (f, var, i, out_of):
# sys.stdout.write('computing {0}th derivative out of {1} ... '.format(i, out_of))
retval = load_cache_or_compute('cache/D_Lambda_{0}.{1}.pickle'.format(i, self.parameter_string), f.diff, var)
# sys.stdout.write('complete.\n')
return retval
self.R_lagmult_vars = load_cache_or_compute('cache/R_lagmult_vars.{0}.pickle'.format(self.parameter_string), generate_variables)
self.symbolic_Lambda = load_cache_or_compute('cache/Lambda.{0}.pickle'.format(self.parameter_string), lambda : self.Lambda(self.R_lagmult_vars))
self.symbolic_D_Lambda = load_cache_or_compute('cache/D_Lambda.{0}.pickle'.format(self.parameter_string), lambda : np.array([compute_diff_and_print_progress(self.symbolic_Lambda, var, i, len(self.R_lagmult_vars)) for i,var in enumerate(self.R_lagmult_vars)]))
self.symbolic_squared_L2_norm_D_Lambda = load_cache_or_compute('cache/objective_function.{0}.pickle'.format(self.parameter_string), lambda : sum(self.symbolic_Lambda.diff(var)**2 for var in self.R_lagmult_vars) / len(self.R_lagmult_vars))
self.symbolic_constraint_function = load_cache_or_compute('cache/constraint_function.{0}.pickle'.format(self.parameter_string), lambda : self.imag_Q(self.R_lagmult_vars[:-1])**2)
def generate_autowrapped_functions (self):
import sympy.utilities.autowrap
start = time.time()
self.constraint_function = sympy.utilities.autowrap.autowrap(self.symbolic_constraint_function, args=self.R_lagmult_vars[:-1], backend='cython')
print 'generating constraint_function (via autowrap) took {0} s.'.format(time.time() - start)
start = time.time()
self.objective_function = sympy.utilities.autowrap.autowrap(self.symbolic_squared_L2_norm_D_Lambda, args=self.R_lagmult_vars, backend='cython')
print 'generating objective_function (via autowrap) took {0} s.'.format(time.time() - start)
def profile_ProblemContext ():
symmetry_degree = 5
symmetry_class = 2
xy_mode_count_range = range(2,3+1)#range(2,10)
sample_count_range = range(21,22+1)#range(16,48,8)
period = 46.5
alpha = 1.0
beta = 16.0
contractions = ['tensor.contract', 'alt_einsum']
for contraction in contractions:
for sample_count in sample_count_range:
for xy_mode_count in xy_mode_count_range:
try:
print '****************** contraction = {0}, sample_count = {1}, xy_mode_count = {2}'.format(contraction, sample_count, xy_mode_count)
pc = call_func_and_print_timing_info('ProblemContext', ProblemContext, symmetry_degree=symmetry_degree, symmetry_class=symmetry_class, xy_mode_count=xy_mode_count, sample_count=sample_count, period=period, alpha=alpha, beta=beta, contraction=contraction)
call_func_and_print_timing_info('pc.generate_symbolic_functions', pc.generate_symbolic_functions)
call_func_and_print_timing_info('pc.generate_autowrapped_functions', pc.generate_autowrapped_functions)
except Exception as e:
print 'caught exception {0}'.format(repr(e))
print ''
print ''
# Initial conditions in form [alpha, beta, time, x,y,z, px, py, pz]:
#3-Fold:
#[1, 1/16, 273.5, 1, 0, 4 sqrt 3, 0, 1, 0]
def coreys_3_fold_curve ():
import cmath
import matplotlib.pyplot as plt
import vector_field
# Corey's 5-fold
# initial_condition = [1.0, 0.0, 4.0*math.sqrt(3.0), 0.0, 1.0, 0.0]
# period = 273.5
# omega = cmath.exp(2.0j*math.pi/period)
# print 'initial_condition = {0}'.format(initial_condition)
alpha = 1.0
beta = 1.0/16.0
# H = generate_hamiltonian(alpha, beta)
# print 'H(initial_condition) = {0}'.format(H(initial_condition))
# hamiltonian_vector_field = generate_hamiltonian_vector_field(alpha, beta)
# Xs,Ts = vector_field.compute_flow_curve(hamiltonian_vector_field, initial_condition, 0.0, period, sample_count)
import heisenberg_dynamics
Xs,Ts,period,sample_count = heisenberg_dynamics.compute_coreys_flow_curve()
Xs = np.array(Xs)
XY = np.ndarray((2*len(Xs),), dtype=float)
XY[0::2] = Xs[:,0]
XY[1::2] = Xs[:,1]
X = Xs[:,0]
Y = Xs[:,1]
Z = Xs[:,2]
import matplotlib.pyplot as plt
plt.figure(1, figsize=(30,15))
sp = plt.subplot(1,2,1)
sp.set_title('(x,y) curve image')
# plt.axes().set_aspect('equal')
plt.plot(X,Y)
sp = plt.subplot(1,2,2)
sp.set_title('z(t)')
plt.plot(Ts,Z)
plt.savefig('3fold.png')
return XY,period,sample_count,alpha,beta
def coreys_5_fold_curve (sample_count):
import cmath
import matplotlib.pyplot as plt
import vector_field
# Corey's 5-fold
initial_condition = [1.0, 0.0, math.sqrt(3.0)/4.0, 0.0, 1.0, 0.0]
period = 46.5
omega = cmath.exp(2.0j*math.pi/period)
print 'initial_condition = {0}'.format(initial_condition)
alpha = 1.0
beta = 16.0
hamiltonian_vector_field = generate_hamiltonian_vector_field(alpha, beta)
Xs,Ts = vector_field.compute_flow_curve(hamiltonian_vector_field, initial_condition, 0.0, period, sample_count)
Xs = np.array(Xs)
XY = np.ndarray((2*len(Xs),), dtype=float)
XY[0::2] = Xs[:,0]
XY[1::2] = Xs[:,1]
# X = Xs[:,0]
# Y = Xs[:,1]
# Z = Xs[:,2]
return XY,period,alpha,beta
# # print Xs
# X = [x for (x,_,_,_,_,_) in Xs]
# Y = [y for (_,y,_,_,_,_) in Xs]
# Z = [z for (_,_,z,_,_,_) in Xs]
# plt.figure(1, figsize=(30,15))
# sp = plt.subplot(1,2,1)
# sp.set_title('(x,y) curve for RK4-solved dynamics')
# # plt.axes().set_aspect('equal')
# plt.plot(X,Y)
# sp = plt.subplot(1,2,2)
# sp.set_title('z(t) for RK4-solved dynamics')
# plt.plot(Ts,Z)
# # sp = plt.subplot(1,3,3)
# # sp.set_title('H(t) for RK4-solved dynamics')
# # plt.plot(Ts, [hamiltonian(X) for X in Xs])
# plt.savefig('5fold.png')
#Initial conditions in form [alpha, beta, time, x,y,z, px, py, pz]:
#7-Fold:
#[2/pi, 16, 57.9, 1, 0, z, 0, 1, 0] with z=sqrt{ pi^(-2) - 1/16 }
def coreys_7_fold_curve (sample_count):
import cmath
import matplotlib.pyplot as plt
import vector_field
# Corey's 5-fold
c = math.sqrt(math.pi**-2 - 1.0/16.0)
initial_condition = [1.0, 0.0, c, 0.0, 1.0, 0.0]
period = 57.9
omega = cmath.exp(2.0j*math.pi/period)
print 'initial_condition = {0}'.format(initial_condition)
alpha = 2.0/math.pi
beta = 16.0
hamiltonian_vector_field = generate_hamiltonian_vector_field(alpha, beta)
Xs,Ts = vector_field.compute_flow_curve(hamiltonian_vector_field, initial_condition, 0.0, period, sample_count)
Xs = np.array(Xs)
XY = np.ndarray((2*len(Xs),), dtype=float)
XY[0::2] = Xs[:,0]
XY[1::2] = Xs[:,1]
# X = Xs[:,0]
# Y = Xs[:,1]
# Z = Xs[:,2]
# import matplotlib.pyplot as plt
# plt.figure(1, figsize=(30,15))
# sp = plt.subplot(1,2,1)
# sp.set_title('(x,y) curve image')
# # plt.axes().set_aspect('equal')
# plt.plot(X,Y)
# sp = plt.subplot(1,2,2)
# sp.set_title('z(t)')
# plt.plot(Ts,Z)
# plt.savefig('7fold.png')
return XY,period,alpha,beta
def main ():
XY,period,sample_count,alpha,beta = coreys_3_fold_curve()
# pc = ProblemContext(symmetry_degree=3, symmetry_class=2, xy_mode_count=60, sample_count=sample_count, period=period, alpha=alpha, beta=beta, contraction='np.einsum')
# # pc = ProblemContext(symmetry_degree=5, symmetry_class=2, xy_mode_count=60, sample_count=sample_count, period=period, alpha=alpha, beta=beta, contraction='np.einsum')
# # pc = ProblemContext(symmetry_degree=7, symmetry_class=2, xy_mode_count=60, sample_count=sample_count, period=period, alpha=alpha, beta=beta, contraction='np.einsum')
# R = np.einsum('ij,j', pc.F_xy.coeffs_from_samples_matrix, XY)
# # import scipy.optimize
# # R = scipy.optimize.fmin(pc.imag_Q, R, maxfun=1000000)
# P = pc.position(R)
# import matplotlib.pyplot as plt
# plt.figure(1, figsize=(30,15))
# sp = plt.subplot(1,2,1)
# sp.set_title('(x,y) curve image')
# # plt.axes().set_aspect('equal')
# plt.plot(P[:,0],P[:,1])
# sp = plt.subplot(1,2,2)
# sp.set_title('z(t)')
# plt.plot(pc.sample_times,P[:,2])
# plt.savefig('7fold.png')
return 0
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import scipy.optimize
import sympy.utilities.autowrap
# pc = ProblemContext(symmetry_degree=3, symmetry_class=1, xy_mode_count=7, sample_count=100, period=10.0, contraction='alt_einsum')
# pc = ProblemContext(symmetry_degree=5, symmetry_class=2, xy_mode_count=10, sample_count=200, period=46.5, alpha=1.0, beta=16.0, contraction='alt_einsum')
pc = call_func_and_print_timing_info('ProblemContext', ProblemContext, symmetry_degree=5, symmetry_class=2, xy_mode_count=15, sample_count=150, period=46.5, alpha=1.0, beta=16.0, contraction='tensor.contract')
call_func_and_print_timing_info('pc.generate_symbolic_functions', pc.generate_symbolic_functions)
call_func_and_print_timing_info('pc.generate_autowrapped_functions', pc.generate_autowrapped_functions)
# Start with the 5-fold curve.
XY = coreys_5_fold_curve(pc.sample_count)
R = pc.F_xy.coeffs_from_samples_matrix.dot(XY)
# R = np.random.randn(2*pc.xy_mode_count)
# start = time.time()
# R = scipy.optimize.fmin(lambda R : pc.constraint_function(*R), R, maxfun=100000000, disp=True)
# print 'constraint optimization took {0} s.'.format(time.time() - start)
start = time.time()
R_lagmult = np.ndarray((2*pc.xy_mode_count+1,), dtype=float)
R_lagmult[:-1] = R
R_lagmult[-1] = 1.0 # Sort of arbitrary.
R_lagmult = call_func_and_print_timing_info('optimize objective function', scipy.optimize.fmin, lambda R_lagmult : pc.objective_function(*R_lagmult), R_lagmult, disp=True, maxfun=100000000)#, callback=print_R)
R = R_lagmult[:-1] # Extract all but the Lagrange multiplier.
PV = call_func_and_print_timing_info('pc.position_and_velocity', pc.position_and_velocity, R)
print 'action(R) = {0}'.format(pc.action(R))
print 'imag_Q(R) = {0}'.format(pc.imag_Q(R))
H = pc.hamiltonian_v(PV)
L = pc.lagrangian_v(PV)
import matplotlib.pyplot as plt
plt.figure(1, figsize=(30,20))
plt.subplot(2,3,1)
plt.title('(x,y)')
plt.plot(PV[:,0], PV[:,1])
plt.subplot(2,3,2)
plt.title('z(t), imag(Q(R)) = {0}'.format(pc.imag_Q(R)))
plt.plot(pc.sample_times, PV[:,2])
plt.subplot(2,3,3)
plt.title('H(t)')
plt.plot(pc.sample_times, H)
plt.subplot(2,3,4)
plt.title('(x\',y\')')
plt.plot(PV[:,3], PV[:,4])
plt.subplot(2,3,5)
plt.title('z\'(t)')
plt.plot(pc.sample_times, PV[:,5])
plt.subplot(2,3,6)
plt.title('L(t)')
plt.plot(pc.sample_times, L)
plt.savefig('dino.png')
if __name__ == '__main__':
main()
| mit |
MohammedWasim/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
rhyolight/nupic | examples/opf/tools/sp_plotter.py | 10 | 14845 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import csv
import matplotlib
import numpy as np
import os
import sys
import time
from nupic.bindings.math import GetNTAReal
from nupic.algorithms.spatial_pooler import SpatialPooler
matplotlib.use('Agg')
import matplotlib.pyplot as plt
realDType = GetNTAReal()
def generatePlot(outputs, origData):
""" Generates a table where each cell represent a frequency of pairs
as described below.
x coordinate is the % difference between input records (origData list),
y coordinate is the % difference between corresponding output records.
"""
PLOT_PRECISION = 100
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
outputSize = len(outputs)
for i in range(0,outputSize):
for j in range(i+1,outputSize):
in1 = outputs[i]
in2 = outputs[j]
dist = (abs(in1-in2) > 0.1)
intDist = int(dist.sum()/2+0.1)
orig1 = origData[i]
orig2 = origData[j]
origDist = (abs(orig1-orig2) > 0.1)
intOrigDist = int(origDist.sum()/2+0.1)
if intDist < 2 and intOrigDist > 10:
print 'Elements %d,%d has very small SP distance: %d' % (i, j, intDist)
print 'Input elements distance is %d' % intOrigDist
x = int(PLOT_PRECISION*intDist/40.0)
y = int(PLOT_PRECISION*intOrigDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
# Add some elements for the scale drawing
distribMatrix[4, 50] = 3
distribMatrix[4, 52] = 4
distribMatrix[4, 54] = 5
distribMatrix[4, 56] = 6
distribMatrix[4, 58] = 7
distribMatrix[4, 60] = 8
distribMatrix[4, 62] = 9
distribMatrix[4, 64] = 10
return distribMatrix
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
""" Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs
"""
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs
def appendInputWithSimilarValues(inputs):
""" Creates an 'one-off' record for each record in the inputs. Appends new
records to the same inputs list.
"""
numInputs = len(inputs)
for i in xrange(numInputs):
input = inputs[i]
for j in xrange(len(input)-1):
if input[j] == 1 and input[j+1] == 0:
newInput = copy.deepcopy(input)
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
break
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges)
def testSP():
""" Run a SP test
"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 2
wantPlot = True
poolPct = 0.5
itr = 1
doLearn = True
while numRecords < 3:
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
# Generate inputs using rand()
inputs = generateRandomInput(numRecords, elemSize, numSet)
if addNear:
# Append similar entries (distance of 1)
appendInputWithNSimilarValues(inputs, 42)
inputSize = len(inputs)
print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)
# Run a number of iterations, with learning on or off,
# retrieve results from the last iteration only
outputs = np.zeros((inputSize,2048))
numIter = 1
if doLearn:
numIter = itr
for iter in xrange(numIter):
for i in xrange(inputSize):
time.sleep(0.001)
if iter == numIter - 1:
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i])
#print outputs[i].sum(), outputs[i]
else:
# TODO: See https://github.com/numenta/nupic/issues/2072
output = np.zeros(2048)
sp.compute(inputs[i], learn=doLearn, activeArray=output)
# Build a plot from the generated input and output and display it
distribMatrix = generatePlot(outputs, inputs)
# If we don't want a plot, just continue
if wantPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
if doLearn:
title += ', leaning ON'
else:
title += ', learning OFF'
title += ', inputs = %d' % len(inputs)
title += ', iterations = %d' % numIter
title += ', poolPct =%f' % poolPct
plt.suptitle(title, fontsize=12)
plt.show()
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
#plt.clf()
numRecords += 1
return
def testSPNew():
""" New version of the test"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 1000
wantPlot = False
poolPct = 0.5
itr = 5
pattern = [60, 1000]
doLearn = True
start = 1
learnIter = 0
noLearnIter = 0
numLearns = 0
numTests = 0
numIter = 1
numGroups = 1000
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = generateRandomInput(numGroups, elemSize, numSet)
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.12,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
for i in xrange(numRecords):
input1 = getRandomWithMods(inputs, 4)
if i % 2 == 0:
input2 = getRandomWithMods(inputs, 4)
else:
input2 = input1.copy()
input2 = modifyBits(input2, 21)
inDist = (abs(input1-input2) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
#print intInDist
if start == 0:
doLearn = True
learnIter += 1
if learnIter == pattern[start]:
numLearns += 1
start = 1
noLearnIter = 0
elif start == 1:
doLearn = False
noLearnIter += 1
if noLearnIter == pattern[start]:
numTests += 1
start = 0
learnIter = 0
cleanPlot = True
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
time.sleep(0.001)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
if not doLearn and intOutDist < 2 and intInDist > 10:
"""
sp.spVerbosity = 10
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
sp.spVerbosity = 0
print 'Elements has very small SP distance: %d' % intOutDist
print output1.nonzero()
print output2.nonzero()
print sp._firingBoostFactors[output1.nonzero()[0]]
print sp._synPermBoostFactors[output1.nonzero()[0]]
print 'Input elements distance is %d' % intInDist
print input1.nonzero()
print input2.nonzero()
sys.stdin.readline()
"""
if not doLearn:
x = int(PLOT_PRECISION*intOutDist/40.0)
y = int(PLOT_PRECISION*intInDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
#print i
# If we don't want a plot, just continue
if wantPlot and cleanPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
#if doLearn:
# title += ', leaning ON'
#else:
# title += ', learning OFF'
title += ', learn sets = %d' % numLearns
title += ', test sets = %d' % numTests
title += ', iter = %d' % numIter
title += ', groups = %d' % numGroups
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.show()
plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
cleanPlot = False
def testSPFile():
""" Run test on the data file - the file has records previously encoded.
"""
spSize = 2048
spSet = 40
poolPct = 0.5
pattern = [50, 1000]
doLearn = True
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = []
#file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
#elemSize = 400
#numSet = 42
#file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
#elemSize = 499
#numSet = 7
outdir = '~/Desktop/ExperimentResults/Basil100x21'
inputFile = outdir+'.csv'
file = open(inputFile, 'rb')
elemSize = 100
numSet = 21
reader = csv.reader(file)
for row in reader:
input = np.array(map(float, row), dtype=realDType)
if len(input.nonzero()[0]) != numSet:
continue
inputs.append(input.copy())
file.close()
# Setup a SP
sp = SpatialPooler(
columnDimensions=(spSize, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=spSet,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.10,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
doLearn = False
print 'Finished reading file, inputs/outputs to process =', len(inputs)
size = len(inputs)
for iter in xrange(100):
print 'Iteration', iter
# Learn
if iter != 0:
for learnRecs in xrange(pattern[0]):
# TODO: See https://github.com/numenta/nupic/issues/2072
ind = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[ind], learn=True, activeArray=outputs[ind])
# Test
for _ in xrange(pattern[1]):
rand1 = np.random.random_integers(0, size-1, 1)[0]
rand2 = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[rand1], learn=False, activeArray=output1)
sp.compute(inputs[rand2], learn=False, activeArray=output2)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
if intInDist != numSet or intOutDist != spSet:
print rand1, rand2, '-', intInDist, intOutDist
x = int(PLOT_PRECISION*intOutDist/spSet)
y = int(PLOT_PRECISION*intInDist/numSet)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
if True:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))
title = 'SP distribution'
title += ', iter = %d' % iter
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
plt.savefig(os.path.join(outdir, '%s' % iter))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
if __name__ == '__main__':
np.random.seed(83)
#testSP()
#testSPNew()
testSPFile()
| agpl-3.0 |
murali-munna/Data-Science-45min-Intros | support-vector-machines-101/svm-example.py | 26 | 2219 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
| unlicense |
CDNoyes/EDL-Py | EntryGuidance/test_vectorize.py | 1 | 18834 | import numpy as np
import pyaudi as da
from pyaudi import gdual_vdouble as gd
import time
from EntryEquations import EDL
from InitialState import InitialState
from Uncertainty import getUncertainty
from Utils.RK4 import RK4
from ParametrizedPlanner import profile
from NMPC import NMPC
from Converge import Bootstrap
N = 250
applyUncertainty = 1
def test_planet():
from Planet import Planet
rho0 = 0.05 * np.random.randn(N) * applyUncertainty
hs = 0.02 * np.random.randn(N) /3 * applyUncertainty
mars = Planet(rho0=rho0,scaleHeight=hs)
h = np.ones_like(hs)*70e3
rho,Vs = mars.atmosphere(h)
rho = rho.squeeze()
if rho.shape == Vs.shape and rho.shape == hs.shape:
print "Planet is vectorized"
return mars
def test_vehicle():
from EntryVehicle import EntryVehicle
dCL = 0.05 * np.random.randn(N) * applyUncertainty
dCD = 0.05 * np.random.randn(N) * applyUncertainty
ev = EntryVehicle(CL=dCL,CD=dCD)
M = np.ones_like(dCL)*5
Cd,Cl = ev.aerodynamic_coefficients(M)
print "Vehicle is vectorized"
return ev
def test_dynamics():
mars = test_planet()
ev = test_vehicle()
from EntryEquations import Entry
from InitialState import InitialState
from Utils.RK4 import RK4
edl = Entry(PlanetModel=mars,VehicleModel=ev)
# u = np.zeros((3,N)) # same length as the vectorized components
x = InitialState()
x = np.tile(x,(N,1)).T
print "IC shape = {}".format(x.shape)
X = [x]
vprofile = vectorProfile()
npc = generateController()
t0 = time.time()
for t in np.linspace(1,400,400):
if 0: # Open Loop
u = vprofile(t)
else:
Xc = X[-1]
energy = edl.energy(Xc[0],Xc[3],False)
lift,drag = edl.aeroforces(Xc[0],Xc[3],Xc[7])
u = npc.controller(energy=energy, current_state=Xc,lift=lift,drag=drag,rangeToGo=None,planet=edl.planet)
u.shape = (1,N)
u = np.vstack((u,np.zeros((2,N))))
eom = edl.dynamics(u)
X.append(RK4(eom, X[-1], np.linspace(t,t+1,10),())[-1])
tMC = time.time() - t0
print "MC w/ vectorization of {} samples took {} s".format(N,tMC)
X = np.array(X)
Xf = X[-1]
print Xf.shape
X = np.transpose(X,(2,1,0))
print X.shape
# J = -(Xf[0]-3397e3)/1000 + Xf[3]/25#+ np.abs(Xf[2]*3397) # alt maximization
# iopt = np.argmin(J)
# print "Optimal switch = {}".format(np.linspace(40,340,N)[iopt])
import matplotlib.pyplot as plt
for Xi in X:
plt.figure(1)
plt.plot(Xi[1]*3397,Xi[2]*3397)
plt.figure(2)
plt.plot(Xi[3],(Xi[0]-3397e3)/1000)
# X = np.transpose(X,(2,1,0))
# plt.figure(1)
# plt.plot(X[iopt][1]*3397,X[iopt][2]*3397,'k')
# plt.figure(2)
# plt.plot(X[iopt][3],(X[iopt][0]-3397e3)/1000,'k')
plt.show()
def vectorProfile():
from ParametrizedPlanner import profile
bank = [1.4,0]
switches = np.linspace(40,340,N)
return lambda t: np.array([(profile(t,switch=[switch], bank=bank,order=0),0,0) for switch in switches]).T
def generateController():
from Simulation import Simulation, Cycle, EntrySim
from Triggers import SRPTrigger, AccelerationTrigger
from InitialState import InitialState
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# ######################################################
# Reference data generation
# ######################################################
reference_sim = Simulation(cycle=Cycle(1),output=False,**EntrySim())
banks = [-np.radians(30),np.radians(75),-np.radians(75),np.radians(30)]
bankProfile = lambda **d: profile(d['time'],[62.30687581, 116.77385384, 165.94954234], banks, order=2)
x0 = InitialState()
output_ref = reference_sim.run(x0,[bankProfile],StepsPerCycle=10)
refs = reference_sim.getFBL()
# ######################################################
# Closed-loop entry
# ######################################################
nmpc = NMPC(fbl_ref=refs, debug=False)
return nmpc
def Optimize():
"""
Full EDL Optimization including
- Reference trajectory
3 bank reversal switch times
4 constant bank angle segments
- Controller parameters
1 Prediction horizon
2 Gains
for a total of 10 optimization parameters.
Or reduced version with 2 banks, 3 angles + controller for 8 parameters.
"""
from scipy.optimize import differential_evolution, minimize
from numpy import pi, radians as rad
from Simulation import Simulation, Cycle, EntrySim
import Parachute
sim = Simulation(cycle=Cycle(1),output=False,**EntrySim(Vf=460))
perturb = getUncertainty()['parametric']
optSize = 1000
samples = perturb.sample(optSize,'S')
edl = EDL(samples,Energy=True)
heading_alignment = False
# Differential Evolution Global Optimization
bounds = [(50,140),(100,165)] + [(rad(70),rad(90)),(rad(70),rad(90)),(0,rad(30))] + [(0,10),(0,10),(0,10)] # general bounds
# bounds = [(100,120),(135,160)] + [(rad(70),rad(90)),(rad(70),rad(90)),(0,rad(30))] + [(2,3),(0,10),(0,10)] # tightened bounds
if heading_alignment:
bounds.extend([(800,1400),(0.1,5)])
# sol = differential_evolution(Cost,args = (sim,samples,optSize,edl,True,True,heading_alignment), bounds=bounds, tol=1e-2, disp=True, polish=False)
# print "Optimized parameters (N={}) are:".format(optSize)
# print sol.x
# print sol
if 0:
# Particle Swarm Optimization
import pyswarms as ps
# Initialize swarm
options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
# higher c1 -> trajectories following their personal best
# higher c2 -> trajectories follow the global best
# Call instance of PSO with bounds argument
bounds_pso = np.array(bounds).T
bounds_pso = (bounds_pso[0],bounds_pso[1])
# print bounds.shape
pop_size = 200
optimizer = ps.single.GlobalBestPSO(n_particles=pop_size, dimensions=len(bounds), options=options, bounds=bounds_pso)
# hack to generate my own initial population
import chaospy as cp
U = [cp.Uniform(b[0],b[1]) for b in bounds]
# replace with a dependent bound for t2:
U[1] = cp.Uniform(lo=U[0], up=165)
init_pop = cp.J(*U).sample(size=pop_size-1, rule="S").T
sol = [ 1.12985487e+02, 1.61527467e+02, 1.53352287e+00, 1.02508346e+00,
4.79475355e-01, 2.47739391e+00, 1.14726959e-01, 6.88822448e+00]
if heading_alignment:
sol.extend([800,0.1])
sol = np.array(sol,ndmin=2)
init_pop = np.concatenate((init_pop,sol), axis=0)
optimizer.pos = init_pop
optimizer.personal_best_pos = init_pop
# Perform optimization
cost, sol = optimizer.optimize(lambda x: SwarmCost(x,sim,samples,optSize,edl,True,True,heading_alignment), print_step=1, iters=20, verbose=3)
# pso sol
sol = [ 112.98548700000001, 161.527467,
1.5335228700000001, 1.0250834600000001, 0.47947535499999999,
2.47739391, 0.114726959, 6.8882244799999999]
# sol = [ 103.53150718, 127.2118855, rad(84.95268405), rad(84.95268), rad(11.97228525), 2.31450607, 4.48346113, 8.30596081]
# sol =[ 1.12985487e+02, 1.61527467e+02, 1.53352287e+00, 1.02508346e+00,
# 4.79475355e-01, 2.47739391e+00, 1.14726959e-01, 6.88822448e+00]
# Parachute.Draw(figure=2)
Cost(sol, sim, samples, optSize, edl, True, True, heading_alignment)
# new_sol = minimize(Cost, sol, args = (sim,samples,optSize,edl), tol=1e-2, method='Nelder-Mead')
return
def SwarmCost(inputs, reference_sim, samples, optSize, edl, reduced, msl, align_heading):
# print inputs.shape
return np.array([Cost(inp, reference_sim, samples, optSize, edl, reduced=True, msl=msl, align_heading=align_heading) for inp in inputs])
def Cost(inputs, reference_sim, samples, optSize, edl, reduced=True, msl=False, align_heading=False):
# Reference Trajectory
if reduced:
switches = inputs[0:2]
banks = inputs[2:5]*np.array([1,-1,1])
else:
switches = inputs[0:3]
banks = inputs[3:7]*np.array([-1,1,-1,1])
if np.any(np.diff(switches) < 0) or np.any(inputs<0):
return 2e5 # arbitrary high cost for bad switch times or negative gains, prediction horizon
bankProfile = lambda **d: profile(d['time'], switch=switches, bank=banks,order=2)
x = InitialState()
output = reference_sim.run(x,[bankProfile])
Xf = output[-1,:]
hf = Xf[3]
lonTarget = np.radians(Xf[5])
# fpaf = Xf[8]
dr = Xf[10]
cr = Xf[11]
high_crossrange = np.abs(cr) > 3
low_altitude = hf <= 9
if high_crossrange or low_altitude:
return 300 + 500*np.abs(cr) - 25*hf # arbitrary high cost for bad reference trajectory
# Otherwise, we have a suitable reference and we can run the QMC
# Closed loop statistics generation
refs = reference_sim.getFBL()
nmpc = NMPC(fbl_ref=refs, debug=False)
if reduced:
nmpc.dt = inputs[5]
nmpc.Q = np.array([[inputs[6],0],[0,inputs[7]]])
else:
nmpc.dt = inputs[7]
nmpc.Q = np.array([[inputs[8],0],[0,inputs[9]]])
if align_heading:
V_lim = inputs[-2]
K = inputs[-1]
x = np.tile(x,(optSize,1)).T
X = [x]
energy0 = edl.energy(x[0],x[3],False)[0]
energyf = Xf[1]*0.5
energy = energy0
E = [energy]
temp = []
while energy > energyf:
Xc = X[-1]
energys = edl.energy(Xc[0],Xc[3],False)
lift,drag = edl.aeroforces(Xc[0],Xc[3],Xc[7])
# Range control
u = nmpc.controller(energy=energys, current_state=Xc, lift=lift, drag=drag, rangeToGo=None, planet=edl.planet)
if align_heading:
# Heading alignment
rtg = lonTarget - Xc[1]
crtg = -Xc[2]
u_heading = np.clip(np.arctan2(crtg,rtg)*K,np.radians(-30),np.radians(30))
heading_cases = np.where(Xc[3]<V_lim)[0]
if heading_cases.shape[0]:
u[heading_cases] = u_heading[heading_cases]
# Shape the control
u.shape = (1,optSize)
u = np.vstack((u,np.zeros((2,optSize))))
de = -np.mean(drag)*np.mean(Xc[3])
if (energy + de) < energyf:
de = energyf - energy
eom = edl.dynamics(u)
X.append(RK4(eom, X[-1], np.linspace(energy,energy+de,10),())[-1])
energy += de
E.append(energy)
if energy < Xf[1]:
temp.append(energy)
if len(E)>600:
break
X = np.array(X)
# Xf = X[-1]
if msl:
Xf = np.array([Trigger(traj, lonTarget, minAlt=6e3, maxVel=485) for traj in X.transpose((2,0,1))]).T # Parachute deployment
else:
Xf = np.array([Trigger(traj,lonTarget) for traj in X.transpose((2,0,1))]).T
Xf_energy = X[-len(temp)]
# X = X.transpose((2,1,0))
# print X.shape
import matplotlib.pyplot as plt
# Xi = Xf
# # ######### for Xi in X:
# plt.figure(1)
# plt.hist2d(Xi[2]*3397,Xi[1]*3397,bins=30,cmap="binary")
# plt.xlabel('Crossrange (km)')
# plt.ylabel('Downrange (km)')
# plt.colorbar()
#
# h = edl.altitude(Xf[0], km=True) # altitude, km
# import pdb
# pdb.set_trace()
for Xi in [Xf,Xf_energy]:
h = edl.altitude(Xi[0], km=True)
plt.figure()
# plt.scatter(Xi[2]*3397,Xi[1]*3397,c=h)
plt.plot(Xi[2]*3397,Xi[1]*3397,'o')
theta = np.linspace(0,2*np.pi,100)
x = np.cos(theta)
y = np.sin(theta)
# for fig in [1,3]:
# plt.figure(fig)
for r in [1,2,8]:
plt.plot(x*r,lonTarget*3397 + y*r,label="{} km".format(r))
plt.legend()
plt.xlabel('Crossrange (km)')
plt.ylabel('Downrange (km)')
# plt.colorbar()
plt.axis('equal')
# plt.figure(2)
#
# plt.plot(Xi[3],h,'o')
plt.show()
h = edl.altitude(Xf[0], km=True) # altitude, km
DR = Xf[1]*edl.planet.radius/1000 # downrange, km
CR = Xf[2]*edl.planet.radius/1000 # -crossrange, km
# Previous cost function
# J = -np.percentile(h,1) + 0.1* (np.percentile(lon,99)-np.percentile(lon,1) + np.percentile(lat,99)-np.percentile(lat,1)) + np.abs(lat.mean())
# Perhaps more theoretically sound?
# J = (np.abs(DR-lonTarget*3397) + np.abs(CR)) #idea: altitude is handled by the trigger, i.e. too low altitude and many undershoots arise which raises the DR/CR errors
Jnorm = np.sqrt((DR-lonTarget*3397)**2+CR**2) # Norm squared, to be differentiable for finite differencing
J = Jnorm
# print "{}% of {} samples landed with combined DR/CR (norm) error < 1 km".format(np.sum(Jnorm<=1)/float(J.size )*100,optSize)
# print "{}% of {} samples landed with combined DR/CR (norm) error < 2 km".format(np.sum(Jnorm<=2)/float(J.size )*100,optSize)
# print "{}% of {} samples landed with combined DR/CR (norm) error < 10 km".format(np.sum(Jnorm<=10)/float(J.size )*100,optSize)
J = Bootstrap(np.mean, J, [J.size], resamples=20)[0][0] # Boostrapped estimate of the mean cost
# plt.hist(J, bins=optSize/10, range=None, normed=False, weights=None, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, color=None, label=None, stacked=False, hold=None, data=None)
# plt.show()
print "input = {}\ncost = {}\n".format(inputs,J)
return J
def Trigger(traj, targetLon, minAlt=0e3, maxVel=600):
for state in traj:
alt = state[0]-3397e3
vel = state[3]
longitude = state[1]
if alt < minAlt or (vel<maxVel and longitude>=targetLon):
return state
return traj[-1]# No better trigger point so final point is used
def OptimizeController():
from scipy.optimize import differential_evolution
perturb = getUncertainty()['parametric']
optSize = 500
samples = perturb.sample(optSize,'S')
nmpc = generateController()
edl = EDL(samples,Energy=True)
bounds = [(0,10),(0,10),(1,30)]
# Cost([0.1,2,2.8],nmpc,samples,optSize,edl)
sol = differential_evolution(CostController,args = (nmpc,samples,optSize,edl), bounds=bounds, tol=1e-2, disp=True, polish=False)
print "Optimized parameters (N={}) are:".format(optSize)
print sol.x
print sol
return
def CostController(inputs, nmpc, samples, optSize, edl):
nmpc.dt = inputs[2]
nmpc.Q = np.array([[inputs[0],0],[0,inputs[1]]])
x = InitialState()
x = np.tile(x,(optSize,1)).T
X = [x]
energy0 = edl.energy(x[0],x[3],False)[0]
energyf = edl.energy(edl.planet.radius + 1.5e3, 500, False)
energy = energy0
E = [energy]
while energy > energyf:
Xc = X[-1]
energys = edl.energy(Xc[0],Xc[3],False)
lift,drag = edl.aeroforces(Xc[0],Xc[3],Xc[7])
u = nmpc.controller(energy=energys, current_state=Xc,lift=lift,drag=drag,rangeToGo=None,planet=edl.planet)
u.shape = (1,optSize)
u = np.vstack((u,np.zeros((2,optSize))))
de = -np.mean(drag)*np.mean(Xc[3])
if (energy + de) < energyf:
# print "Final step"
de = energyf - energy
eom = edl.dynamics(u)
X.append(RK4(eom, X[-1], np.linspace(energy,energy+de,10),())[-1])
energy += de
E.append(energy)
# print "Finished integration step {}".format(len(E)-1)
if len(E)>600:
break
X = np.array(X)
# X = X.transpose((2,1,0))
# print X.shape
# import matplotlib.pyplot as plt
#
# for Xi in X:
# plt.figure(1)
# plt.plot(Xi[1]*3397,Xi[2]*3397,'o')
#
# plt.figure(2)
# plt.plot(Xi[3],(Xi[0]-3397e3)/1000,'o')
# plt.show()
Xf = X[-1]
h = edl.altitude(Xf[0], km=True) # km
lon = Xf[1]*edl.planet.radius/1000
lat = Xf[2]*edl.planet.radius/1000
J = -np.percentile(h,1) + 0.1* (np.percentile(lon,99)-np.percentile(lon,1) + np.percentile(lat,99)-np.percentile(lat,1))
# print J
# print np.percentile(h,1)
# print np.percentile(lon,99)-np.percentile(lon,1)
# print np.percentile(lat,99)-np.percentile(lat,1)
return J
def test():
test_dynamics()
def FD():
from scipy.optimize import differential_evolution, minimize
from numpy import pi, radians as rad
from Simulation import Simulation, Cycle, EntrySim
import matplotlib.pyplot as plt
sim = Simulation(cycle=Cycle(1),output=False,**EntrySim(Vf=460))
perturb = getUncertainty()['parametric']
optSize = 500
samples = perturb.sample(optSize,'S')
edl = EDL(samples,Energy=True)
# MSL, reduced input set
# sol = [ 113.82395707, 170.07337194, # Reversal times
# 1.40719634, 0.97780072, 0.45524235, # Bank angles
# 1.66167718, 0.20598009, 7.78547546] # Controller
# Heavy, full input set
sol = np.array([ 26.06441256, 115.16979593, 167.14750033,
0.37717073, 1.494434, 1.06315079, 0.54208874,
2.31450607, 4.48346113, 8.30596081])
I = np.eye(sol.size)
# J0 = Cost(sol, sim, samples, optSize,edl,False)
J = []
deltas = np.array([1e-5,1e-4,1e-3,1e-2,0.1,1])
labels = ['Switch 1','Switch 2','Switch 3','Bank 1','Bank 2','Bank 3','Bank 4','h','G1','G2']
for delta in deltas:
Ji = []
for vector in I:
delta_vector = delta*vector
Jp = (Cost(sol+delta_vector, sim, samples, optSize,edl,False))
Jn = (Cost(sol-delta_vector, sim, samples, optSize,edl,False))
Ji.append((Jp-Jn)/(2*delta)) # Central difference
J.append(Ji)
J = np.array(J).T
plt.figure(1)
for label,ji in zip(labels,J):
plt.semilogx(deltas, ji, label=label)
plt.xlabel('$\Delta$Input used in central differencing')
plt.ylabel('$\Delta$J$/\Delta Input}$')
plt.title('MC Size = {}'.format(optSize))
plt.legend()
plt.figure(2)
for label,ji in zip(labels[:3],J[:3]):
plt.semilogx(deltas, ji, label=label)
plt.xlabel('$\Delta$Input (s) used in central differencing')
plt.ylabel('$\Delta$J$/\Delta Input}$')
plt.title('Switch Times, MC Size = {}'.format(optSize))
plt.legend()
plt.figure(3)
for label,ji in zip(labels[3:7],J[3:7]):
plt.semilogx(deltas, ji, label=label)
plt.xlabel('$\Delta$Input (rad) used in central differencing')
plt.ylabel('$\Delta$J$/\Delta Input}$')
plt.title('Bank Angles, MC Size = {}'.format(optSize))
plt.legend()
plt.show()
if __name__ == "__main__":
# test()
# OptimizeController()
Optimize()
# FD()
| gpl-3.0 |
scienceopen/ledtime | cmostimeout.py | 2 | 2909 | #!/usr/bin/env python
"""
Reads Calgary sCMOS .out timing files
tk0: FPGA tick when frame was taken. In this test configuration of internal trigger,
it basically tells you, yes, the FPGA is running and knows how to count. The FPGA
timebase could have large error (yielding large absolute time error) and yet
this column would be exactly the same.
tk1: FPGA tick after frame was retrieved, to compare with 'elapsed' column
elapsed: PC clock relative time since acquisition start, when frame was retrieved vis-a-vis tk1
Michael Hirsch
"""
from scipy.stats import linregress
from numpy import arange
from pathlib import Path
from pandas import read_csv
from matplotlib.pyplot import figure,subplots
import seaborn as sns
sns.set_context('talk',font_scale=1.5)
#%% user parameters
fps = 20
fn = Path('~/Dropbox/CMOScalgary/test_clock2.out').expanduser()
dtExpected = 1/fps
tick_sec = 1/40e6 # we suppose the FPGA clock cycle is 40MHz. tick_sec is the period of the tick, assuming zero timebase error (real life timebase has substantial error)
#%% parse data
# sep uses regex for "one or more spaces"
data = read_csv(fn,sep='\s{1,}',skiprows=14,skipfooter=1,engine='python',
header=None, usecols=(1,2,3),
names=['elapsed','tk1','tk0'])
N=data.shape[0]
#%% per frame error
dtick_sec = data['tk1'].diff()*tick_sec
print(dtick_sec.describe())
dt = data['elapsed'].diff()
print(dt.describe())
fg,axs = subplots(1,2)
ax = axs[0]
ax.set_title('PC time')
dterr = dt - dtExpected
dterr.hist(ax=ax,bins=100)
ax = axs[1]
ax.set_title('FPGA time')
dtickerr = dtick_sec - dtExpected
dtickerr.hist(ax=ax,bins=100)
fg.suptitle(f'Per-frame timing error, N={N} fps={fps}',size='xx-large')
for a in axs:
a.set_yscale('log')
a.set_xlabel('time error [sec.]')
#%% accumulated error (bias)
expectedElapsed = arange(N) * dtExpected
elapsedErrorPC = data['elapsed'] - expectedElapsed
elapsedErrorFPGA = data['tk1']*tick_sec - expectedElapsed
elapsedErrorInt = data['tk0']*tick_sec - expectedElapsed
"""
Hmm, looks like the PC and FPGA have different error slopes--as expected due to large timebase errors
let's do a linear regression
"""
FPGAslope,FPGAint = linregress(expectedElapsed,elapsedErrorFPGA)[:2]
PCslope, PCint = linregress(expectedElapsed,elapsedErrorPC)[:2]
#ax.scatter(elapsedErrorPC,elapsedErrorFPGA)
#intc,slop = linregress(data['elapsed'],data['tk0']*tick_sec)[:2]
ax = figure().gca()
ax.plot(expectedElapsed,elapsedErrorPC,label='PC')
ax.plot(expectedElapsed,expectedElapsed*PCslope + PCint,label='PCfit')
ax.plot(expectedElapsed,elapsedErrorFPGA,label='FPGA')
ax.plot(expectedElapsed,expectedElapsed*FPGAslope + FPGAint,label='FPGAfit')
ax.plot(expectedElapsed,elapsedErrorInt)
ax.legend(loc='best')
ax.set_title(f'Cumulative timing error, N={N} fps={fps}')
ax.set_xlabel('True elapsed time [sec.]')
ax.set_ylabel('Accumulated Error [sec.]')
ax.grid(True)
| gpl-3.0 |
researchstudio-sat/wonpreprocessing | python-processing/classification/multiclass_classifier.py | 1 | 4712 | __author__ = 'Federico'
# Multiclass Naive-Bayes classifier for categorization of WoN e-mail dataset
# It uses MultinomialNB classifier
from numpy import *
from tools.tensor_utils import read_input_tensor, SparseTensor
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from nltk.corpus import stopwords
def get_example_data():
# read the tensor from the folder passed by args
data_file_prefix = sys.argv[1]
header_file = data_file_prefix + '/headers.txt'
data_files = [data_file_prefix + "/connection.mtx",
data_file_prefix + "/needtype.mtx",
data_file_prefix + "/subject.mtx",
data_file_prefix + "/content.mtx",
data_file_prefix + "/category.mtx"]
slices = [SparseTensor.CONNECTION_SLICE, SparseTensor.NEED_TYPE_SLICE, SparseTensor.ATTR_SUBJECT_SLICE,
SparseTensor.ATTR_CONTENT_SLICE, SparseTensor.CATEGORY_SLICE]
tensor = read_input_tensor(header_file, data_files, slices, False)
data = []
target = []
# Store the chosen input into lists.
# The "if" statement is meant to include only samples with a single category (No multilabel)
for need_index in tensor.getNeedIndices():
content = ""
categories = tensor.getAttributesForNeed(need_index, SparseTensor.CATEGORY_SLICE)
numCategories = len(categories)
if numCategories >= 1:
category_index = tensor.getSliceMatrix(SparseTensor.CATEGORY_SLICE)[need_index,].nonzero()[1][0]
target.append(category_index)
for word in tensor.getAttributesForNeed(need_index, SparseTensor.ATTR_SUBJECT_SLICE):
content += word + " "
data.append(content)
# Include only few of all the categories (e.g. with samples > n)
newdata = []
newtarget = []
for i in range(len(target)):
if target.count(target[i]) > 50:
newtarget.append(target[i])
newdata.append(data[i])
data = newdata
target = newtarget
# Print out the input, just a check:
target_names = tensor.getHeaders()
print("test")
print data
print target_names
print target
return data, target, target_names
# Call for the input
my_data, my_target, my_targetname = get_example_data()
# A little information about dimensions and format of the input:
print type(my_data), type(my_target), # format of data and targets
print len(my_data) # number of samples
print len(my_target)
# Let's build the training and testing datasets:
SPLIT_PERC = 0.80 # 80% goes into training, 20% into test
split_size = int(len(my_data)*SPLIT_PERC)
X_train = my_data[:split_size]
X_test = my_data[split_size:]
y_train = my_target[:split_size]
y_test = my_target[split_size:]
# Training, prediction and evaluation of the classifier(s):
def train_and_evaluate(clf, X_train, X_test, y_train, y_test, y_name):
# Training
clf.fit(X_train, y_train)
# Prediction of testing sets
y_pred = clf.predict(X_test)
# Precision, recall and support (i.e. nr. of samples used for the testing)
print "Classification Report:"
print metrics.classification_report(y_test, y_pred)
# Confusion Matrix
print "Confusion Matrix:"
print metrics.confusion_matrix(y_test, y_pred)
# Visualization of Categories / Assigned / Data
print "Tested data => assigned category, data:"
for i in range(len(X_test)):
print str(i) + ") Real category: " + str(y_name[y_test[i]]) + ", Assigned category: " + \
str(y_name[y_pred[i]]) + ", Data: " + str(X_test[i])
# Assign names to the categories (defined by numbers)
print "\n Categories: \n"
categories = set()
for cat in y_pred:
categories.add(cat)
categories = sorted(categories)
for cat in categories:
print str(cat) + " " + y_name[cat]
# Introducing stop words
stopset = set(stopwords.words('english'))
# Two different classifiers: Count and Tfidf vectors
clf_count = Pipeline([
('vect', CountVectorizer(
stop_words=stopset,
token_pattern=ur"\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b",
)),
('clf', MultinomialNB(alpha=1)),
])
clf_tfidf = Pipeline([
('vect', TfidfVectorizer(
stop_words=stopset,
token_pattern=ur"\b[a-z0-9_\-\.]+[a-z][a-z0-9_\-\.]+\b",
)),
('clf', MultinomialNB(alpha=1)),
])
# List of classifiers
clfs = [clf_count, clf_tfidf]
# Run the evaluation/classification
for clf in clfs:
train_and_evaluate(clf, X_train, X_test, y_train, y_test, my_targetname)
| apache-2.0 |
shahankhatch/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
jorik041/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/multi/test_monotonic.py | 2 | 8539 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import Index, IntervalIndex, MultiIndex
def test_is_monotonic_increasing():
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic is True
assert i._is_strictly_monotonic_increasing is True
assert Index(i.values).is_monotonic is True
assert i._is_strictly_monotonic_increasing is True
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic is False
assert i._is_strictly_monotonic_increasing is False
assert Index(i.values).is_monotonic is False
assert Index(i.values)._is_strictly_monotonic_increasing is False
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert i.is_monotonic is False
assert i._is_strictly_monotonic_increasing is False
assert Index(i.values).is_monotonic is False
assert Index(i.values)._is_strictly_monotonic_increasing is False
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert i.is_monotonic is False
assert i._is_strictly_monotonic_increasing is False
assert Index(i.values).is_monotonic is False
assert Index(i.values)._is_strictly_monotonic_increasing is False
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic is False
assert Index(i.values).is_monotonic is False
assert i._is_strictly_monotonic_increasing is False
assert Index(i.values)._is_strictly_monotonic_increasing is False
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic is True
assert Index(i.values).is_monotonic is True
assert i._is_strictly_monotonic_increasing is True
assert Index(i.values)._is_strictly_monotonic_increasing is True
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert i.is_monotonic is False
assert i._is_strictly_monotonic_increasing is False
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic is True
assert Index(i.values).is_monotonic is True
assert i._is_strictly_monotonic_increasing is True
assert Index(i.values)._is_strictly_monotonic_increasing is True
def test_is_monotonic_decreasing():
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing is True
assert i._is_strictly_monotonic_decreasing is True
assert Index(i.values).is_monotonic_decreasing is True
assert i._is_strictly_monotonic_decreasing is True
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing is False
assert i._is_strictly_monotonic_decreasing is False
assert Index(i.values).is_monotonic_decreasing is False
assert Index(i.values)._is_strictly_monotonic_decreasing is False
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic_decreasing is False
assert i._is_strictly_monotonic_decreasing is False
assert Index(i.values).is_monotonic_decreasing is False
assert Index(i.values)._is_strictly_monotonic_decreasing is False
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert i.is_monotonic_decreasing is False
assert i._is_strictly_monotonic_decreasing is False
assert Index(i.values).is_monotonic_decreasing is False
assert Index(i.values)._is_strictly_monotonic_decreasing is False
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing is False
assert Index(i.values).is_monotonic_decreasing is False
assert i._is_strictly_monotonic_decreasing is False
assert Index(i.values)._is_strictly_monotonic_decreasing is False
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing is True
assert Index(i.values).is_monotonic_decreasing is True
assert i._is_strictly_monotonic_decreasing is True
assert Index(i.values)._is_strictly_monotonic_decreasing is True
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert i.is_monotonic_decreasing is False
assert i._is_strictly_monotonic_decreasing is False
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing is True
assert Index(i.values).is_monotonic_decreasing is True
assert i._is_strictly_monotonic_decreasing is True
assert Index(i.values)._is_strictly_monotonic_decreasing is True
def test_is_strictly_monotonic_increasing():
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing is True
assert idx._is_strictly_monotonic_increasing is False
def test_is_strictly_monotonic_decreasing():
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
def test_searchsorted_monotonic(indices):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(indices, (MultiIndex, IntervalIndex)):
return
# nothing to test if the index is empty
if indices.empty:
return
value = indices[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (indices == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(indices)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if indices.is_monotonic_increasing:
ssm_left = indices._searchsorted_monotonic(value, side='left')
assert expected_left == ssm_left
ssm_right = indices._searchsorted_monotonic(value, side='right')
assert expected_right == ssm_right
ss_left = indices.searchsorted(value, side='left')
assert expected_left == ss_left
ss_right = indices.searchsorted(value, side='right')
assert expected_right == ss_right
elif indices.is_monotonic_decreasing:
ssm_left = indices._searchsorted_monotonic(value, side='left')
assert expected_left == ssm_left
ssm_right = indices._searchsorted_monotonic(value, side='right')
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
indices._searchsorted_monotonic(value, side='left')
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.