repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xavierwu/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
anomam/pvlib-python | pvlib/iotools/ecmwf_macc.py | 4 | 11452 | """
Read data from ECMWF MACC Reanalysis.
"""
import threading
import pandas as pd
try:
import netCDF4
except ImportError:
class netCDF4:
@staticmethod
def Dataset(*a, **kw):
raise ImportError(
'Reading ECMWF data requires netCDF4 to be installed.')
try:
from ecmwfapi import ECMWFDataServer
except ImportError:
def ECMWFDataServer(*a, **kw):
raise ImportError(
'To download data from ECMWF requires the API client.\nSee https:/'
'/confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets'
)
#: map of ECMWF MACC parameter keynames and codes used in API
PARAMS = {
"tcwv": "137.128",
"aod550": "207.210",
'aod469': '213.210',
'aod670': '214.210',
'aod865': '215.210',
"aod1240": "216.210",
}
def _ecmwf(server, startdate, stopdate, params, targetname):
# see http://apps.ecmwf.int/datasets/data/macc-reanalysis/levtype=sfc/
server.retrieve({
"class": "mc",
"dataset": "macc",
"date": "%s/to/%s" % (startdate, stopdate),
"expver": "rean",
"grid": "0.75/0.75",
"levtype": "sfc",
"param": params,
"step": "3/6/9/12/15/18/21/24",
"stream": "oper",
"format": "netcdf",
"time": "00:00:00",
"type": "fc",
"target": targetname,
})
def get_ecmwf_macc(filename, params, startdate, stopdate, lookup_params=True,
server=None, target=_ecmwf):
"""
Download data from ECMWF MACC Reanalysis API.
Parameters
----------
filename : str
full path of file where to save data, ``.nc`` appended if not given
params : str or sequence of str
keynames of parameter[s] to download
startdate : datetime.datetime or datetime.date
UTC date
stopdate : datetime.datetime or datetime.date
UTC date
lookup_params : bool, default True
optional flag, if ``False``, then codes are already formatted
server : ecmwfapi.api.ECMWFDataServer
optionally provide a server object, default is ``None``
target : callable
optional function that calls ``server.retrieve`` to pass to thread
Returns
-------
t : thread
a thread object, use it to check status by calling `t.is_alive()`
Notes
-----
To download data from ECMWF requires the API client and a registration
key. Please read the documentation in `Access ECMWF Public Datasets
<https://confluence.ecmwf.int/display/WEBAPI/Access+ECMWF+Public+Datasets>`_.
Follow the instructions in step 4 and save the ECMWF registration key
as `$HOME/.ecmwfapirc` or set `ECMWF_API_KEY` as the path to the key.
This function returns a daemon thread that runs in the background. Exiting
Python will kill this thread, however this thread will not block the main
thread or other threads. This thread will terminate when the file is
downloaded or if the thread raises an unhandled exception. You may submit
multiple requests simultaneously to break up large downloads. You can also
check the status and retrieve downloads online at
http://apps.ecmwf.int/webmars/joblist/. This is useful if you kill the
thread. Downloads expire after 24 hours.
.. warning:: Your request may be queued online for an hour or more before
it begins to download
Precipitable water :math:`P_{wat}` is equivalent to the total column of
water vapor (TCWV), but the units given by ECMWF MACC Reanalysis are kg/m^2
at STP (1-atm, 25-C). Divide by ten to convert to centimeters of
precipitable water:
.. math::
P_{wat} \\left( \\text{cm} \\right) \
= TCWV \\left( \\frac{\\text{kg}}{\\text{m}^2} \\right) \
\\frac{100 \\frac{\\text{cm}}{\\text{m}}} \
{1000 \\frac{\\text{kg}}{\\text{m}^3}}
The keynames available for the ``params`` argument are given by
:const:`pvlib.iotools.ecmwf_macc.PARAMS` which maps the keys to codes used
in the API. The following keynames are available:
======= =========================================
keyname description
======= =========================================
tcwv total column water vapor in kg/m^2 at STP
aod550 aerosol optical depth measured at 550-nm
aod469 aerosol optical depth measured at 469-nm
aod670 aerosol optical depth measured at 670-nm
aod865 aerosol optical depth measured at 865-nm
aod1240 aerosol optical depth measured at 1240-nm
======= =========================================
If ``lookup_params`` is ``False`` then ``params`` must contain the codes
preformatted according to the ECMWF MACC Reanalysis API. This is useful if
you want to retrieve codes that are not mapped in
:const:`pvlib.iotools.ecmwf_macc.PARAMS`.
Specify a custom ``target`` function to modify how the ECMWF API function
``server.retrieve`` is called. The ``target`` function must have the
following signature in which the parameter definitions are similar to
:func:`pvlib.iotools.get_ecmwf_macc`. ::
target(server, startdate, stopdate, params, filename) -> None
Examples
--------
Retrieve the AOD measured at 550-nm and the total column of water vapor for
November 1, 2012.
>>> from datetime import date
>>> from pvlib.iotools import get_ecmwf_macc
>>> filename = 'aod_tcwv_20121101.nc' # .nc extension added if missing
>>> params = ('aod550', 'tcwv')
>>> start = end = date(2012, 11, 1)
>>> t = get_ecmwf_macc(filename, params, start, end)
>>> t.is_alive()
True
"""
if not filename.endswith('nc'):
filename += '.nc'
if lookup_params:
try:
params = '/'.join(PARAMS.get(p) for p in params)
except TypeError:
params = PARAMS.get(params)
startdate = startdate.strftime('%Y-%m-%d')
stopdate = stopdate.strftime('%Y-%m-%d')
if not server:
server = ECMWFDataServer()
t = threading.Thread(target=target, daemon=True,
args=(server, startdate, stopdate, params, filename))
t.start()
return t
class ECMWF_MACC(object):
"""container for ECMWF MACC reanalysis data"""
TCWV = 'tcwv' # total column water vapor in kg/m^2 at (1-atm,25-degC)
def __init__(self, filename):
self.data = netCDF4.Dataset(filename)
# data variables and dimensions
variables = set(self.data.variables.keys())
dimensions = set(self.data.dimensions.keys())
self.keys = tuple(variables - dimensions)
# size of lat/lon dimensions
self.lat_size = self.data.dimensions['latitude'].size
self.lon_size = self.data.dimensions['longitude'].size
# spatial resolution in degrees
self.delta_lat = -180.0 / (self.lat_size - 1) # from north to south
self.delta_lon = 360.0 / self.lon_size # from west to east
# time resolution in hours
self.time_size = self.data.dimensions['time'].size
self.start_time = self.data['time'][0]
self.stop_time = self.data['time'][-1]
self.time_range = self.stop_time - self.start_time
self.delta_time = self.time_range / (self.time_size - 1)
def get_nearest_indices(self, latitude, longitude):
"""
Get nearest indices to (latitude, longitude).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
Returns
-------
idx_lat : int
index of nearest latitude
idx_lon : int
index of nearest longitude
"""
# index of nearest latitude
idx_lat = int(round((latitude - 90.0) / self.delta_lat))
# avoid out of bounds latitudes
if idx_lat < 0:
idx_lat = 0 # if latitude == 90, north pole
elif idx_lat > self.lat_size:
idx_lat = self.lat_size # if latitude == -90, south pole
# adjust longitude from -180/180 to 0/360
longitude = longitude % 360.0
# index of nearest longitude
idx_lon = int(round(longitude / self.delta_lon)) % self.lon_size
return idx_lat, idx_lon
def interp_data(self, latitude, longitude, utc_time, param):
"""
Interpolate ``param`` values to ``utc_time`` using indices nearest to
(``latitude, longitude``).
Parmaeters
----------
latitude : float
Latitude in degrees
longitude : float
Longitude in degrees
utc_time : datetime.datetime or datetime.date
Naive or UTC date or datetime to interpolate
param : str
Name of the parameter to interpolate from the data
Returns
-------
Interpolated ``param`` value at (``utc_time, latitude, longitude``)
Examples
--------
Use this to get a single value of a parameter in the data at a specific
time and set of (latitude, longitude) coordinates.
>>> from datetime import datetime
>>> from pvlib.iotools import ecmwf_macc
>>> data = ecmwf_macc.ECMWF_MACC('aod_tcwv_20121101.nc')
>>> dt = datetime(2012, 11, 1, 11, 33, 1)
>>> data.interp_data(38.2, -122.1, dt, 'aod550')
"""
nctime = self.data['time'] # time
ilat, ilon = self.get_nearest_indices(latitude, longitude)
# time index before
before = netCDF4.date2index(utc_time, nctime, select='before')
fbefore = self.data[param][before, ilat, ilon]
fafter = self.data[param][before + 1, ilat, ilon]
dt_num = netCDF4.date2num(utc_time, nctime.units)
time_ratio = (dt_num - nctime[before]) / self.delta_time
return fbefore + (fafter - fbefore) * time_ratio
def read_ecmwf_macc(filename, latitude, longitude, utc_time_range=None):
"""
Read data from ECMWF MACC reanalysis netCDF4 file.
Parameters
----------
filename : string
full path to netCDF4 data file.
latitude : float
latitude in degrees
longitude : float
longitude in degrees
utc_time_range : sequence of datetime.datetime
pair of start and stop naive or UTC date-times
Returns
-------
data : pandas.DataFrame
dataframe for specified range of UTC date-times
"""
ecmwf_macc = ECMWF_MACC(filename)
try:
ilat, ilon = ecmwf_macc.get_nearest_indices(latitude, longitude)
nctime = ecmwf_macc.data['time']
if utc_time_range:
start_idx = netCDF4.date2index(
utc_time_range[0], nctime, select='before')
stop_idx = netCDF4.date2index(
utc_time_range[-1], nctime, select='after')
time_slice = slice(start_idx, stop_idx + 1)
else:
time_slice = slice(0, ecmwf_macc.time_size)
times = netCDF4.num2date(nctime[time_slice], nctime.units)
df = {k: ecmwf_macc.data[k][time_slice, ilat, ilon]
for k in ecmwf_macc.keys}
if ECMWF_MACC.TCWV in df:
# convert total column water vapor in kg/m^2 at (1-atm, 25-degC) to
# precipitable water in cm
df['precipitable_water'] = df[ECMWF_MACC.TCWV] / 10.0
finally:
ecmwf_macc.data.close()
return pd.DataFrame(df, index=times.astype('datetime64[s]'))
| bsd-3-clause |
kikocorreoso/mplutils | mplutils/tests/test_colors.py | 1 | 2445 | # -*- coding: utf-8 -*-
"""
Tests for classic module
"""
import unittest
import os
import numpy as np
import matplotlib.pyplot as plt
from mplutils import colors
class colors_mpl_cnames_test(unittest.TestCase):
def test_bad_string(self):
result = colors.colors_mpl_cnames('kiko')
self.assertEqual(repr(result),
'No colors found with kiko in color name')
def test_good_string(self):
result = colors.colors_mpl_cnames('yellow')
self.assertEqual(sorted(result.cnames),
['greenyellow',
'lightgoldenrodyellow',
'lightyellow',
'yellow',
'yellowgreen'])
result = colors.colors_mpl_cnames('maroon')
self.assertEqual(sorted(result.cnames),
['maroon'])
class colors_check_grayscale_test(unittest.TestCase):
def test_luminosity_grayscale(self):
fig, ax = plt.subplots()
ax.plot([1,2,3], 'b')
colors.colors_check_grayscale(fig,
transform = 'luminosity',
filename = 'temp.png')
img = plt.imread('temp.png')
img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])
self.assertTrue(np.all(img[:,0]*3 == img[:,:3].sum(axis = 1)))
def test_luma_grayscale(self):
fig, ax = plt.subplots()
ax.plot([1,2,3], 'b')
colors.colors_check_grayscale(fig,
transform = 'luma',
filename = 'temp.png')
img = plt.imread('temp.png')
img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])
self.assertTrue(np.all(img[:,0]*3 == img[:,:3].sum(axis = 1)))
def test_average_grayscale(self):
fig, ax = plt.subplots()
ax.plot([1,2,3], 'b')
colors.colors_check_grayscale(fig,
transform = 'average',
filename = 'temp.png')
img = plt.imread('temp.png')
img = img.reshape(img.shape[0] * img.shape[1], img.shape[2])
self.assertTrue(np.all(img[:,0]*3 == img[:,:3].sum(axis = 1)))
def tearDown(self):
os.remove('temp.png')
if __name__ == "__main__":
unittest.main(verbosity = 2) | mit |
fherwig/ppmpy | ppmpy/ppm.py | 1 | 179947 | #
# ppm.py - Tools for accessing and visualising PPMstar data.
# Depends on the nugridpy package developed by the
# NuGrid collaboration
# (c) 2010 - 2013 Daniel Alexander Bertolino Conti
# (c) 2011 - 2013 Falk Herwig
# (c) 2014 - 2015 Sam Jones, Falk Herwig, Robert Andrassy
#
"""
ppm.py
PPM is a Python module for reading Yprofile-01-xxxx.bobaaa files.
Simple session for working with ppm.py, here I assume user's working
directory contains some YProfile files.
If the user find any bugs or errors, please email us.
Yprofile files Assumptions
==========================
- labeled as YProfile-01-xxxx.bobaaa and the xxxx is the NDump that is
located within each file.
- There can be a maximum of 9999 files in each directory. The first 10
lines of each file are the only place where header data is uniquely
located.
- Header data is separated by five spaces and line breaks.
- An attribute cant be in the form of a number, ie if the user is able
to 'float(attribute)' (in python) without an error attribute will not
be returned.
- A row of column Names or Cycle Names is preceded and followed by a
blank line.
- A block of data values are preceded and followed by a blank line
except the last data block in the file, where it is not followed by a
blank line.
- In the YProfile file, if a line of attributes contains the attribute
Ndump, then that line of attributes and any other following lines of
attributes in the file are cycle attributes.
Header Attribute Assumptions
============================
- Header attributes are separated by lines and instances of four spaces
( )
- Header attribute come in one of below here are things that do stuff
with the data 6 types.
- The first type is the first attribute in the file. It is formatted in
such a way that the name of the attribute is separated from its
associated data with an equals sign ex.
Stellar Conv. Luminosity = 1.61400E-02 x 10^43 ergs,
- The second type is when an attribute contains the sub string 'grid;'.
It is formatted in such a way such that there are 3 numerical values
separated by 'x' which are then followed by the string ' grid;' ex.
384 x 384 x 384 grid;
- The third type is were name of the attribute is separated from its
associated data with an equals sign. Also that data may be followed
by a unit of measurement. ex.
Thickness (Mm) of heating shell = 1.00000E+00
- The fourth type is when an attribute contains a colon. The String
before the colon is the title of the attribute. After the colon there
is a list of n sub attributes, each with a sub title, and separated by
its value with an equals sign. Aslo each sub attribute is separated
by a comma ex.
At base of the convection zone: R = 9.50000E+00, g = 4.95450E-01,
rho = 1.17400E+01, p = 1.69600E+01
- The fifth is when an attribute starts with 'and'. after the and, the
next word after has to be the same as one word contained in the
previous attribute ex.
and of transition from convection to stability = 5.00000E-01 at
R = 3.00000E+01 Mm.
- The sixth is when there is a string or attribute title followed by two
spaces followed by one value followed by two spaces followed by an
'and' which is then followed by a second Value ex.
Gravity turns on between radii 6.00000E+00 and 7.00000E+00 Mm.
Examples
========
Here is an example runthrough.
>>> from ppm import *
>>> p=y_profile()
>>> head= p.hattri
>>> cols= p.dcols
>>> cyc=p.cattri
>>> print head
[['Stellar Conv. Luminosity', '1.61400E-02 x 10^43 ergs,'], ['384 x 384 x 384 grid;'], ... ]
>>> print cols
['j', 'Y', 'FVconv', 'UYconv', ... , 'Ek H+He']
>>> print cyc
['Ndump', 't', 'trescaled', 'Conv Ht', 'H+He Ht', ..., 'EkXZHHeMax']
>>> j2=p.getColData('j','Yprofile-01-0002',numType='file',resolution='a')
>>> print j2
[[384.0, 383.0, 382.0,..., 1.0],[192.0, 191.0, 190.0,...,1.0]]
>>> j2=p.get('j')
>>> print j2
[[384.0, 383.0, 382.0,..., 1.0],[192.0, 191.0, 190.0,...,1.0]]
>>> j55=p.getColData('j',55,numType='t',resolution='l')
The closest time is at Ndump = 2
>>> print j55
>>> y=p.getColData('Rho1 H+He',2, resolution='L')
>>> print y
[2.0420099999999999e-07, 5.4816300000000004e-07, ... , 0]
and
>>> p.plot('j','FVconv')
plots the data.
"""
from numpy import *
import numpy as np
from math import *
from nugridpy.data_plot import *
from nugridpy import utils
import matplotlib.pylab as pyl
import matplotlib.pyplot as pl
import os
import re
from nugridpy import astronomy as ast
import scipy.interpolate as interpolate
from scipy import optimize
import copy
# from rprofile import rprofile_reader
def prep_Yprofile_data(user="Paul", run="BW-Sakurai-1536-N13"):
'''
for given user and run create YProfile dir in run dir and link all
YProfile data from dump directories
this is for use on BlueWaters
'''
import subprocess
login_names = {'Paul': 'pwoodwar',\
'Stou': 'sandalsk',\
'Falk': 'fherwig' }
run_dir = '/scratch/sciteam/'+login_names[user]+'/'+run
data_dir = run_dir+'/YProfiles'
mkdir_command = 'mkdir '+data_dir
subprocess.call([mkdir_command],shell=True)
remove_broken_links = 'find -L '+data_dir+' -type l -delete'
subprocess.call([remove_broken_links],shell=True)
link_command = 'ln -fs '+run_dir+'/????/YProfile-01/* '+data_dir
subprocess.call([link_command],shell=True)
return data_dir
def set_nice_params():
fsize=14
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'font.size': fsize,
'legend.fontsize': fsize*0.75,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'ytick.minor.pad': 8,
'ytick.major.pad': 8,
'xtick.minor.pad': 8,
'xtick.major.pad': 8,
'text.usetex': False}
pl.rcParams.update(params)
def set_YProf_path(path,YProf_fname='YProfile-01-0000.bobaaa'):
'''Set path to location where YProfile directories can be found.
For example, set path to the swj/PPM/RUNS_DIR VOSpace directory
as a global variable, so that it need only be set once during
an interactive session; instances can then be loaded by
refering to the directory name that contains YProfile files.
ppm.ppm_path contains path
ppm.cases contains dirs in path that contain file with name
YProf_fname usually used to determine dirs with
YProfile files
'''
global ppm_path, cases
ppm_path = path
cases = []
for thing in os.listdir(ppm_path):
dir_thing = os.path.join(ppm_path,thing)
if os.path.isdir(dir_thing) and \
os.path.isfile(os.path.join(ppm_path,thing,YProf_fname)):
cases.append(thing)
def prof_compare(cases,ndump=None,yaxis_thing='FV H+He',ifig=None,num_type='ndump',
labels=None,logy=True):
"""
Compare profiles of quantities from multiple PPM Yprofile instances at a
given time of nump number.
Parameters
----------
cases : list
list containing the Yprofile instances that you want to compare
ndump : string or int, optional
The filename, Ndump or time, if None it defaults to the
last NDump. The default is None.
yaxis_thing : string, optional
What quantity to plot on the y-axis.
The default is 'FV H+He'
ifig : int, optional
Figure number. If None, chose automatically.
The default is None.
num_type : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp. The default is "ndump".
labels : list, optional
List of labels; one for each of the cases.
If None, labels are simply indices.
The default is None.
logy : boolean, optional
Should the y-axis have a logarithmic scale?
The default is True.
Examples
--------
import ppm
run1='/rpod3/fherwig/PPM/RUNS_DATA/VLTP_MESA_M0.542/C1'
run2='/rpod3/fherwig/PPM/RUNS_DATA/sakurai-num-exp-robustness-onset-GOSH/A1'
YY=ppm.yprofile(run1)
YY2=ppm.yprofile(run2)
ppm.prof_compare([YY,YY2],ndump=100,num_type='time',
labels=['VLTP_0.543','SAK'],yaxis_thing='Rho',
logy=False)
"""
fsize=14
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
pl.rcParams.update(params)
jline_offset=6
if labels is None:
labels=[]*len(cases)
if ifig is None:
pl.figure()
else:
pl.figure(ifig)
i=0
for Y in cases:
j=i+jline_offset
if labels is None:
labels[i] = str(i)
Y.plot('Y',yaxis_thing,fname=ndump,numtype=num_type,legend=labels[i],\
logy=logy,shape=utils.linestyle(j)[0],markevery=utils.linestyle(j)[1])
i += 1
class yprofile(DataPlot):
"""
Data structure for holding data in the YProfile.bobaaa files.
Parameters
----------
sldir : string
which directory we are working in. The default is '.'.
"""
def __init__(self, sldir='.'):
"""
init method
Parameters
----------
sldir : string
which directory we are working in. The default is '.'.
"""
self.files = [] # List of files in this directory
self.cycles= [] # list of cycles in this directory
self.hattrs = [] # header attributes
self.dcols = [] # list of the column attributes
self.cattrs= [] # List of the attributes of the y profiles
self._cycle=[] # private var
self._top=[] # privite var
self.sldir = sldir #Standard Directory
if not os.path.isdir(sldir): # then try if ppm_path has been set
try:
sldir = ppm_path+'/'+sldir
except:
print 'ppm_path not correctly set: '+sldir+' is not directory.'
self.sldir = sldir
if not os.path.isdir(sldir): # If the path still does not exist
print 'error: Directory, '+sldir+ ' not found'
print 'Now returning None'
return None
else:
f=os.listdir(sldir) # reads the directory
for i in range(len(f)): # Removes any files that are not YProfile files
if re.search(r"^YProfile-01-[0-9]{4,4}.bobaaa$",f[i]):
self.files.append(f[i])
self.files.sort()
if len(self.files)==0: # If there are no YProfile files in the directory
print 'Error: no YProfile named files exist in Directory'
print 'Now returning None'
return None
slname=self.files[len(self.files)-1] #
self.slname = slname
print "Reading attributes from file ",slname
self.hattrs,self.dcols, self._cycle=self._readFile()
# split the header into header attributes and top attributes
self._splitHeader()
# return the header attributes as a dictionary
self.hattrs=self._formatHeader()
# returns the concatination of cycle and top attributes
self.cattrs=self.getCattrs()
self.ndumpDict=self.ndumpDict(self.files)
self.radbase = float(self.hattrs['At base of the convection zone R'])
self.radtop = float(self.hattrs['Thickness (Mm) of transition from convection to stability '].split()[4])
print 'There are '+str(len(self.files))+' YProfile files in the ' +self.sldir+' directory.'
print 'Ndump values range from '+str(min(self.ndumpDict.keys()))+' to '+str(max(self.ndumpDict.keys()))
t=self.get('t',max(self.ndumpDict.keys()))
t1=self.get('t',min(self.ndumpDict.keys()))
print 'Time values range from '+ str(t1[-1])+' to '+str(t[-1])
self.cycles=self.ndumpDict.keys()
return None
def ndumpDict(self, fileList):
"""
Method that creates a dictionary of Filenames with the
associated key of the filename's Ndump.
Parameters
----------
fileList : list
A list of yprofile filenames.
Returns
-------
dictionary
the filenamem, ndump dictionary
"""
ndumpDict={}
for i in xrange(len(fileList)):
ndump=fileList[i].split("-")[-1]
ndump=ndump.split(".")[0]
ndumpDict[int(ndump)]=fileList[i]
return ndumpDict
def getHattrs(self):
""" returns the list of header attributes"""
h=self.hattrs.sorted()
return h.sort()
def getDCols(self):
""" returns the list of column attributes"""
return self.dcols
def getCattrs(self):
""" returns the list of cycle attributes"""
dupe=False
data=[]
for i in range(len(self._cycle)):
data.append(self._cycle[i])
for i in range(len(self._top)): # checking for dublicate attributes
# ie if an Atribute is both a top attri and a cycle attri
for k in range(len(self._cycle)):
if self._top[i]==self._cycle[k]:
dupe=True
if not dupe:
data.append(self._top[i])
dupe=False
return data
def get(self, attri, fname=None, numtype='ndump', resolution='H', \
silent=False, **kwargs):
"""
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
fname : string, optional
The filename, Ndump or time, if None it defaults to the
last NDump. The default is None.
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp. The default is "ndump".
Resolution : string, optional
Data you want from a file, for example if the file contains
two different sized columns of data for one attribute, the
argument 'a' will return them all, 'h' will return the
largest, 'l' will return the lowest. The default is 'H'.
"""
isCyc=False #If Attri is in the Cycle Atribute section
isCol=False #If Attri is in the Column Atribute section
isHead=False #If Attri is in the Header Atribute section
if fname==None:
fname=max(self.ndumpDict.keys())
if not silent:
print "Warning at yprofile.get(): fname is None, "\
"the last dump (%d) will be used." \
% max(self.ndumpDict.keys())
if attri in self.cattrs: # if it is a cycle attribute
isCyc = True
elif attri in self.dcols:# if it is a column attribute
isCol = True
elif attri in self.hattrs:# if it is a header attribute
isHead = True
# directing to proper get method
if isCyc:
return self.getCycleData(attri,fname, numtype, resolution=resolution, \
silent=silent)
if isCol:
return self.getColData(attri,fname, numtype, resolution=resolution, \
silent=silent)
if isHead:
return self.getHeaderData(attri, silent=silent)
else:
res = self.computeData(attri, fname, numtype, silent=silent, **kwargs)
if res is None:
if not silent:
print 'That Data name does not appear in this YProfile Directory'
print 'Returning none'
return res
def getHeaderData(self, attri, silent=False):
"""
Parameters
----------
attri : string
The name of the attribute.
Returns
-------
string or integer
Header data that is associated with the attri.
Notes
-----
To see all possable options in this instance type
instance.getHattrs().
"""
isHead = False
if attri in self.hattrs:
isHead = True
if not isHead:# Error checking
if not silent:
print 'The attribute '+attri+' does not appear in these YProfiles'
print 'Returning None'
return None
data=self.hattrs[attri] #Simple dictionary access
return data
def getCycleData(self, attri, FName=None, numType='ndump',
Single=False, resolution='H', silent=False):
"""
Parameters
----------
attri : string
What we are looking for.
FName : string, optional
The filename, Ndump or time, if None it defaults to the last
NDump. The default is None.
numType : string, optional
Designates how this function acts and how it interprets
FName. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp. The default is "ndump".
Single : boolean, optional
Determining whether the user wants just the Attri contained
in the specified ndump or all the dumps below that ndump.
The default is False.
Resolution : string, optional
Data you want from a file, for example if the file contains
two different sized columns of data for one attribute, the
argument 'a' will return them all, 'h' will return the
largest, 'l' will return the lowest. The defalut is 'H'.
Returns
-------
list
A Datalist of values for the given attribute or a
single attribute in the file FName.
"""
if FName==None: #By default choose the last YProfile
FName=max(self.ndumpDict.keys())
if not silent:
print "Warning at yprofile.getCycleData(): FName is None, "\
"the last dump (%d) will be used." % \
max(self.ndumpDict.keys())
isCyc= False #If Attri is in the Cycle Atribute section
boo=True
filename=self.findFile(FName, numType, silent=silent)
data=0
if attri in self._cycle: #if attri is a cycle attribute rather than a top attribute
isCyc = True
if attri not in self._cycle and isCyc:# Error checking
if not silent:
print 'Sorry that Attribute does not appear in the fille'
print 'Returning None'
return None
if not Single and isCyc:
data= self.getColData( attri,filename,'file',resolution, True)
return data
if Single and isCyc:
data= self.getColData( attri,filename,'file',resolution, True)
if data==None:
return None
index=len(data)-1
return data[index]
if not Single and not isCyc:
data=[]
i=0
while boo: #Here we basically open up each YProfile File and append the required top attribute
# to our data variable
data.append(self.readTop(attri,self.files[i],self.sldir))
if self.files[i]==filename: #if we have reached the final file that the user wants.
boo=False
i+=1
for j in range(len(data)): #One top attribute hase a value of '*****' we cant float that, so we ignore it here
if '*' not in data[j]:
data[j]=float(data[j])
data=array(data)
if Single and not isCyc:
data=self.readTop(attri,filename,self.sldir)
data=float(data)
return data
def getColData(self, attri, FName, numType='ndump', resolution='H',
cycle=False, silent=False):
"""
Parameters
----------
attri : string
Attri is the attribute we are loking for.
FName : string
The name of the file, Ndump or time we are looking for.
numType : string, optional
Designates how this function acts and how it interprets
FName. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp. The default is "ndump".
Resolution : string, optional
Data you want from a file, for example if the file contains
two different sized columns of data for one attribute, the
argument 'a' will return them all, 'H' will return the
largest, 'l' will return the lowest. The defalut is 'H'.
cycle : boolean, optional
Are we looking for a cycle or column attribute.
The default is False.
Returns
-------
list
A Datalist of values for the given attribute.
Notes
-----
To ADD: options for a middle length of data.
"""
def reduce_h(r):
'''
Function for reducing hi-res arrays to low-res arrays.
To be called for FV profiles at present because of
averaging issues.
Parameters
----------
r : array
array to be reduced
Output
------
r : array
reduced array
'''
return (r[0::2]+r[0::2])/2.
num=[] #temp list that holds the line numbers where the
# attribute is found in
dataList=[] # holds final data
attriLine='' # hold a line that the attribute is found in
attriList=[] # holds a list of attributes that the attribute is
# found in
numList=[] # holds a single column of data
boo=False #temp boolean
tmp=''
FName=self.findFile(FName, numType, silent=silent)
#print FName
stddir=self.sldir
resolution=resolution.capitalize()
if stddir.endswith('/'): # Makeing sure that the standard dir ends with a slash
# So we get something like /dir/file instead of /dirfile
FName = str(stddir)+str(FName)
else:
FName = str(stddir)+'/'+str(FName)
boo=True
try:
f=open(FName, 'r')
except IOError:
if not silent:
print "That File, "+FName+ ", does not exist."
print 'Returning None'
return None
List=f.readlines()
f.close()
for i in range(len(List)):#finds in what lines the attribute is
#located in. This is stored in num, a list of line numbers
tmpList=List[i].split(' ')
for k in range(len(tmpList)):
tmpList[k]=tmpList[k].strip()
for k in range(len(tmpList)):
if not cycle:
if attri == tmpList[k] and not('Ndump' in List[i]): #if we are looking for a column attribute
num.append(i)
else:
if attri == tmpList[k] and ('Ndump' in List[i]): #if we are looking for a cycle attribute
num.append(i)
if i==(len(List) -1) and len(num)==0: #error checking
if not silent:
print "Attribute DNE in file, Returning None"
return None
for j in range(len(num)): #for each line that attri appears in
attriList=[]
rowNum=num[j] #the line in the file that the attribute
#appears at
attriLine=List[rowNum]
tmpList=attriLine.split(' ') #tmplist will be a list of
#attributes
for i in range(len(tmpList)):#formating tmplist
tmpList[i]=tmpList[i].strip()
if tmpList[i]!='':
attriList.append(tmpList[i])
for i in range(len(attriList)):
# here we find at what element in the list attri
# appears at, this value bacomes colNum
if attri == attriList[i]:
break
colNum=i
rowNum+=2 #Since the line of Data is two lines after
#the line of attributes
#print List, rowNum
while rowNum<len(List) and List[rowNum]!= '\n': #and rowNum<len(List)-1:
# while we are looking at a line with data in it
# and not a blank line and not the last line in
# the file
tmpList=List[rowNum].split(None) #split the line
#into a list of data
#print tmpList, colNum
numList.append(tmpList[colNum])
#append it to the list of data
rowNum+=1
#Because an attributes column of data may appear more
#than onece in a file, must make sure not to add it twice
if len(dataList)==0: #No data in dataList yet, no need to check
dataList.append(numList)
else:
for k in range(len(dataList)):
#if a list of data is allready in the
#dataList with the same length of numList
#it means the data is allready present, do not add
if len(numList)== len(dataList[k]):
boo=False
if boo:
dataList.append(numList)
boo = True
numList=[]
tmp=''
tmpList=[]
#here we format the data if the user wants higher or the lower resolution of data
if resolution.startswith('H'):
for i in range(len(dataList)):
if len(dataList[i])>len(tmp):
tmp=dataList[i]
dataList=array(tmp,dtype=float)
elif resolution.startswith('L'):
for i in range(len(dataList)):
if len(dataList[i])<len(tmp) or len(tmp)==0:
tmp=dataList[i]
dataList=array(tmp,dtype=float)
else:
for i in range(len(dataList)):
for k in range(len(dataList[i])):
dataList[i][k]=float(dataList[i][k])
dataList[i][k]=float(dataList[i][k])
dataList[i]=array(dataList[i])
try: # If dataList is a list of lists that has one element [[1,2,3]]
# reformat dataList as [1,2,3]
j=dataList[1][0]
except IndexError:
tmp = True
except TypeError:
tmp = False
if tmp:
dataList=dataList[0]
tmp = False
# print resolution, len(dataList), int(self.hattrs['gridX'])
# reduce FV arrays if resolution:
if resolution == 'L' and len(dataList)==int(self.hattrs['gridX']):
#print 'reducing array for low resolution request'
dataList = reduce_h(dataList)
return dataList
def computeData(self, attri, fname, numtype = 'ndump', silent=False,\
**kwargs):
def get_missing_args(required_args, **kwargs):
missing_args = []
for this_arg in required_args:
if not this_arg in kwargs:
missing_args.append(this_arg)
return missing_args
def cdiff(x):
dx = 0.5*(np.roll(x, +1) - np.roll(x, -1))
dx[0] = dx[1]
dx[-1] = dx[-2]
return dx
# The unit of G in the code is 10^{-3} g cm^3 s^{-2}.
G_code = ast.grav_const/1e-3
nabla_ad = 0.4
if attri == 'T9':
required_args = ('airmu', 'cldmu')
missing_args = get_missing_args(required_args, **kwargs)
if len(missing_args) > 0:
if not silent:
print 'The following arguments are missing: ', \
missing_args
return None
airmu = kwargs['airmu']
cldmu = kwargs['cldmu']
if 'T9_corr_params' in kwargs:
T9_corr_params = kwargs['T9_corr_params']
else:
T9_corr_params = None
rho = self.get('Rho', fname, numtype = numtype, resolution = 'l', \
silent = silent)
p = self.get('P', fname, numtype = numtype, resolution = 'l', \
silent = silent)
fv = self.get('FV H+He', fname, numtype = numtype, resolution = 'l', \
silent = silent)
mu = fv*cldmu + (1. - fv)*airmu
# gas constant in code units
RR = 8.3144598
T9 = mu*p/(RR*rho)
if T9_corr_params is not None:
T9 = T9_corr_params[0]*T9**T9_corr_params[1]
return T9
elif attri == 'Hp':
r = self.get('Y', fname, numtype, resolution = 'l', silent = silent)
p = self.get('P', fname, numtype, resolution = 'l', silent = silent)
logp = np.log(p)
dr = cdiff(r)
dlogp = cdiff(logp)
Hp = -dr/dlogp
return Hp
elif attri == 'g':
r = self.get('Y', 0., 'time', resolution = 'l')
r_bot = float(self.get('At base of the convection zone R', 0))
rho = self.get('Rho', 0., 'time', resolution = 'l')
g_bot = float(self.get('At base of the convection zone g', 0))
# Centre r_bot on the nearest cell.
idx_bot = np.argmin(np.abs(r - r_bot))
# Mass below r_bot (using r[idx_bot] instead of r_bot makes
# g[idx_bot] == g_bot).
m_bot = g_bot*(r[idx_bot]**2)/G_code
dm = 4.*np.pi*(r**2)*cdiff(r)*rho
m = -np.cumsum(dm) # Get the mass profile by integration.
# Shift the mass profile to make sure that m[idx_bot] == m_bot.
# The mass profile at small radii won't make sense, because
# the core is artificial with no gravity.
m += m_bot - m[idx_bot]
g = G_code*m/(r**2) # Gravity profile (see the note above).
return g
elif attri == 'nabla':
T9 = self.get('T9', fname, numtype, resolution = 'l', silent = silent, \
**kwargs)
p = self.get('P', fname, numtype, resolution = 'l', silent = silent)
logT9 = np.log(T9)
logp = np.log(p)
dlogT9 = cdiff(logT9)
dlogp = cdiff(logp)
nabla = dlogT9/dlogp
return nabla
elif attri == 'nabla_rho':
rho = self.get('Rho', fname, numtype, resolution = 'l', silent = silent)
p = self.get('P', fname, numtype, resolution = 'l', silent = silent)
logrho = np.log(rho)
logp = np.log(p)
dlogrho = cdiff(logrho)
dlogp = cdiff(logp)
nabla_rho = dlogrho/dlogp
return nabla_rho
elif attri == 'N2':
g = self.get('g', 0, resolution = 'l')
Hp = self.get('Hp', fname, numtype, resolution = 'l', silent = silent)
nabla_rho = self.get('nabla_rho', fname, numtype, resolution = 'l', silent = silent)
# Ideal gas assumed.
N2 = (g/Hp)*(nabla_ad - 1. + nabla_rho)
return N2
elif attri == 'enuc_C12pg':
required_args = ('airmu', 'cldmu', 'fkair', 'fkcld', \
'AtomicNoair', 'AtomicNocld')
missing_args = get_missing_args(required_args, **kwargs)
if len(missing_args) > 0:
if not silent:
print 'The following arguments are missing: ', \
missing_args
return None
airmu = kwargs['airmu']
cldmu = kwargs['cldmu']
fkair = kwargs['fkair']
fkcld = kwargs['fkcld']
AtomicNoair = kwargs['AtomicNoair']
AtomicNocld = kwargs['AtomicNocld']
kwargs2 = {}
if 'Q' in kwargs:
kwargs2['Q'] = kwargs['Q']
if 'corr_fact' in kwargs:
kwargs2['corr_fact'] = kwargs['corr_fact']
if 'use_dt' in kwargs:
kwargs2['use_dt'] = kwargs['use_dt']
if 'T9_corr_params' in kwargs:
kwargs2['T9_corr_params'] = kwargs['T9_corr_params']
enuc_C12pg = self._get_enuc_C12pg(fname, airmu, cldmu, fkair, \
fkcld, AtomicNoair, AtomicNocld, \
numtype = numtype, silent = silent, \
**kwargs2)
return enuc_C12pg
elif attri == 'L_C12pg':
enuc_C12pg = self.get('enuc_C12pg', fname, numtype, resolution = 'l', \
silent = silent, **kwargs)
r = self.get('Y', fname, numtype, resolution = 'l', silent = silent)
dV = 4.*np.pi*r**2*cdiff(r)
L_C12pg = np.sum(enuc_C12pg*dV)
return L_C12pg
else:
return None
def _get_enuc_C12pg(self, fname, airmu, cldmu, fkair, fkcld, \
AtomicNoair, AtomicNocld, numtype = 'ndump', \
Q = 1.944, corr_fact = 1., use_dt = False, \
T9_corr_params = None, silent = False):
T9 = self.get('T9', fname = fname, numtype = numtype, \
resolution = 'l', airmu = airmu, cldmu = cldmu, \
T9_corr_params = T9_corr_params, silent = silent)
fv = self.get('FV H+He', fname = fname, numtype = numtype, \
resolution = 'l', silent = silent)
rho = self.get('Rho', fname = fname, numtype = numtype, \
resolution = 'l', silent = silent)
rhocld = self.get('Rho H+He', fname = fname, numtype = numtype, \
resolution = 'l', silent = silent)
rhoair = self.get('RHOconv', fname = fname, numtype = numtype, \
resolution = 'l', silent = silent)
dt = float(self.get('dt', fname = fname, numtype = numtype, \
silent = silent))
TP13 = T9**(1./3.)
TP23 = TP13*TP13
TP12 = np.sqrt(T9)
TP14 = np.sqrt(TP12)
TP32 = T9*TP12
TM13 = 1./TP13
TM23 = 1./TP23
TM32 = 1./TP32
T9inv = 1. / T9
thyng = 2.173913043478260869565 * T9
vc12pg = 20000000.*TM23 * np.exp(-13.692*TM13 - thyng*thyng)
vc12pg = vc12pg * (1. + T9*(9.89-T9*(59.8 - 266.*T9)))
thing2 = vc12pg + TM32*(1.0e5 * np.exp(-4.913*T9inv) + \
4.24e5 * np.exp(-21.62*T9inv))
thing2[np.where(T9 < .0059)] = 0.
thing2[np.where(T9 > 0.75)] = 200.
vc12pg = thing2 * rho * 1000.
v = 1./ rho
atomicnocldinv = 1./AtomicNocld
atomicnoairinv = 1./AtomicNoair
Y1 = rhocld * fv * v * atomicnocldinv
Y2 = rhoair * (1. - fv) * v * atomicnoairinv
smaller = .0000001
reallysmall = smaller * smaller
CN = 96.480733
if use_dt:
# We want the average rate during the current time step.
# If the burning is too fast all the stuff available burns
# in a fraction of the time step. We do not allow to burn
# more than what is available, so the average burn rate is
# lower than then instantaneous one.
thing3 = fkair * Y1 * Y2 * vc12pg * dt
thing3[where(Y1 < reallysmall)] = 0.
thing2 = np.min(np.array((thing3, Y1)), axis = 0)
#for i in range(len(Y1)):
# print '{:d} {:.1e} {:.1e} {:.1e}'.format(i, Y1[i], thing3[i], Y1[i]/thing3[i])
DY = fkcld * thing2
enuc = DY * rho * CN * Q / dt
else:
# We want the instantaneous burning rate. This does not
# depend on how much stuff is available.
thing3 = fkair * Y1 * Y2 * vc12pg
DY = fkcld * thing3
enuc = DY * rho * CN * Q
# This factor can account for the heating bug if present.
enuc *= corr_fact
return enuc
def findFile(self, FName, numType='FILE', silent=False):
"""
Function that finds the associated file for FName when Fname
is time or NDump.
Parameters
----------
FName : string
The name of the file, Ndump or time we are looking for.
numType : string, optional
Designates how this function acts and how it interprets
FName. If numType is 'file', this function will get the
desird attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the cycle with
the closest time stamp. The default is "FILE".
"""
numType=numType.upper()
boo=False
indexH=0
indexL=0
if numType=='FILE':
#do nothing
return str(FName)
elif numType=='NDUMP':
try: #ensuring FName can be a proper NDump, ie no letters
FName=int(FName)
except:
if not silent:
print 'Improper value for NDump, choosing 0 instead'
FName=0
if FName < 0:
if not silent:
print 'User Cant select a negative NDump'
print 'Reselecting NDump as 0'
FName=0
if FName not in self.ndumpDict.keys():
if not silent:
print 'NDump '+str(FName)+ ' Does not exist in this directory'
print 'Reselecting NDump as the largest in the Directory'
print 'Which is '+ str(max(self.ndumpDict.keys()))
FName=max(self.ndumpDict.keys())
boo=True
elif numType=='T' or numType=='TIME':
try: #ensuring FName can be a proper time, ie no letters
FName=float(FName)
except:
if not silent:
print 'Improper value for time, choosing 0 instead'
FName=0
if FName < 0:
if not silent:
print 'A negative time does not exist, choosing a time = 0 instead'
FName=0
timeData=self.get('t',self.ndumpDict[max(self.ndumpDict.keys())],numtype='file')
keys=self.ndumpDict.keys()
keys.sort()
tmp=[]
for i in xrange(len(keys)):
tmp.append(timeData[keys[i]])
timeData=tmp
time= float(FName)
for i in range(len(timeData)): #for all the values of time in the list, find the Ndump that has the closest time to FName
if timeData[i]>time and i ==0:
indexH=i
indexL=i
break
if timeData[i]>time:
indexH=i
indexL=i-1
break
if i == len(timeData)-1:
indexH=i
indexL=i-1
high=float(timeData[indexH])
low= float(timeData[indexL])
high=high-time
low=time-low
if high >=low:
if not silent:
print 'The closest time is at Ndump = ' +str(keys[indexL])
FName=keys[indexL]
else:
if not silent:
print 'The closest time is at Ndump = ' +str(keys[indexH])
FName=keys[indexH]
boo=True
else:
if not silent:
print 'Please enter a valid numType Identifyer'
print 'Returning None'
return None
if boo:#here i assume all yprofile files start like 'YProfile-01-'
FName=self.ndumpDict[FName]
return FName
def _splitHeader(self):
"""
Private function that splits up the data in the header section of the YProfile
into header attributes and top attributes, where top attributes are just
cycle attributes located in the header section of the YProfile
"""
tmp=[]
tmp2=[]
slname=self.files[0]
# Find the header section from another YProfile
header, tm, tm1=self._readFile()
if len(header)!=len(self.hattrs): #error checking
print 'Header atribute error, directory has two YProfiles that have different header sections'
print 'Returning unchanged header'
return None
for i in range(len(header)):
if header[i]==self.hattrs[i]: #if the headers are bothe the same, that means its a
tmp.append(header[i]) #header attribute
else: #Else it changes cycle to cycle and therfore
#its a cycle attribute
tmp2.append(header[i])
for i in range(len(tmp2)): #Formats the top attributes
tmp2[i]=tmp2[i][0] #Ie splits the attributes from its associated data.
self.hattrs=tmp
self._top=tmp2
def _formatHeader(self):
"""
Private function that takes in a set of header attributes and
then Formats them into a dictionary.
Input -> A List of headers in the proper format
Assumptions
===========
The first element in the list is Stellar Conv. Luminosity header
The output in the dictionary looks like
{'Stellar Conv. Luminosity':The associated data}.
If an element contains the string 'grid;' it is the grid size
header and the first, second and third are the x, y and z grid
sizes respectively.
The output in the dictionary looks like
{'gridX':9,'gridY':9,'gridZ':9}.
If an element is size two the first item is the header name and
the second will be its associated value.
The output in the dictionary looks like {'First Item':value}
If an element contains a colon, The string preceding the colon
is one part of the header name. The string after the colon
will be a list of associations in the form of the name followed
by an equals sign followed by its value.
for example a line like this would look like:
"""
headers=self.hattrs
dic={}
i=0
print "Analyzing headers ..."
while i < len(headers):
if i ==0: # If it is the Stellar Luminosity attribute
tmp=headers[i][1]
"""
if '^' in tmp[2]:
j=tmp[2].split('^')
j=float(j[0])**float(j[1])
else:
j=tmp[2]
tmp=float(tmp[0])*float(j)
"""
dic[headers[i][0]]=tmp
i+=1
elif 'grid;' in headers[i][0]: # If its the grid header attribute
tmp1=[]
tmp= headers[i][0].split()
for j in range(len(tmp)):
tmp[j]=tmp[j].strip('x')
tmp[j]=tmp[j].strip('')
for j in range(len(tmp)):
if tmp[j]!='':
tmp1.append(tmp[j])
tmp=tmp1
dic['gridX']=tmp[0]
dic['gridY']=tmp[1]
dic['gridZ']=tmp[2]
i+=1
elif len(headers[i])==2: # If its the header attribute that is seperated by a single = sign
tmp=headers[i][1]
dic[headers[i][0]]=tmp
i+=1
elif ':' in headers[i][0]: #If its the header attribute like 'Title: a=2, b=3
tmp=headers[i][0].split(':')
tmp2=tmp[1].split(',')
for j in range(len(tmp2)):
tmp3=tmp2[j].split('=')
for k in range(len(tmp3)):
tmp3[k]=tmp3[k].strip()
dic[tmp[0]+' '+tmp3[0]]=tmp3[1]
i+=1
elif headers[i][0].startswith('and '):
tmp=headers[i][0].split('=',1)
tmp2=tmp[0].lstrip('and')
tmp2=tmp2.lstrip()
prev=headers[i-1][0].split()
curr=tmp2.split()
curr=curr[0]
tmp3=''
for j in range(len(prev)):
if prev[j]== curr:
break;
tmp3+=prev[j]+' '
tmp2=tmp3+tmp2
dic[tmp2]=tmp[1].strip()
i+=1
else:
tmp=headers[i][0].split(' ')
for j in range(len(tmp)):
tmp[j]=tmp[j].strip()
dic[tmp[0]+' Low']=tmp[1]
dic[tmp[0]+' High']=tmp[3]
i+=1
return dic
# below are some plotting functions integrated from the PPMstar_svn
# server utils/YProfPy directory
def prof_time(self,fname,yaxis_thing='vY',num_type='ndump',logy=False,
radbase=None,radtop=None,ifig=101,ls_offset=0,label_case="",
**kwargs):
"""
Plot the time evolution of a profile from multiple
dumps of the same run (...on the same figure).
Velocities 'v', 'vY' and/or 'vXZ' may also be plotted.
Parameters
----------
fname : int or list
Cycle or list of cycles to plot. Could also be time
in minutes (see num_type).
yaxis_thing : string
What should be plotted on the y-axis?
In addition to the dcols quantities, you may also choose
'v', 'vY', and 'vXZ' -- the RMS 'total', radial and
tangential velocities.
The default is 'vY'.
logy : boolean
Should the velocity axis be logarithmic?
The default value is False
radbase, radtop : float
Radii of the base and top of the convective region,
respectively. If not None, dashed vertical lines will
be drawn to mark these boundaries.
The default value is None.
num_type : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp. The default is "ndump".
ifig : integer
figure number
ls_offset : integer
linestyle offset for argument in utils.linestyle for plotting
more than one case
label_case : string
optional extra label for case
Examples
--------
import ppm
run='/rpod3/fherwig/PPM/RUNS_DATA/VLTP_MESA_M0.542/C1'
YY=ppm.yprofile(run)
YY.prof_time([0,5,10,15,20,25],logy=False,num_type='time',radbase=10.7681,radtop=23.4042)
"""
#fsize=14
#params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
#'font.family': 'Times New Roman',
#'figure.facecolor': 'white',
#'text.fontsize': fsize,
#'legend.fontsize': fsize,
#'xtick.labelsize': fsize*0.8,
#'ytick.labelsize': fsize*0.8,
#'text.usetex': False}
#pl.rcParams.update(params)
if type(fname) is not list:
fname = [fname]
if num_type is 'time':
fname = [f * 60 for f in fname]
pl.figure(ifig)
i=0
for dump in fname:
# FH: I am changing this. The double resolution data
# in YProfiles is broken and needs to be reduced.
# if yaxis_thing in ['j','Y','FVconv','UYconv','FV H+He',\
# 'UY H+He','Rho','Rho1','A']:
# Y=self.get('Y',fname=dump)
# else:
Y=self.get('Y',fname=dump,resolution='L')
if yaxis_thing is 'v':
Ek = self.get('Ek',fname=dump,numtype=num_type,resolution='l')
v = np.sqrt(2.*array(Ek,dtype=float))
y = v*1000
if logy:
ylab = '$\log <u>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
else:
ylab = '$<u>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
elif yaxis_thing is 'vY':
EkY = self.get('EkY',fname=dump,numtype=num_type,resolution='l')
vY = np.sqrt(array(EkY,dtype=float)) # no factor 2 for v_Y and v_XZ
y = vY*1000
if logy:
ylab = '$\log <u_r>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
else:
ylab = '$<u_r>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
elif yaxis_thing is 'vXZ':
EkXZ = self.get('EkXZ',fname=dump,numtype=num_type,resolution='l')
vXZ = np.sqrt(array(EkXZ,dtype=float)) # no factor 2 for v_Y and v_XZ
y = vXZ*1000
if logy:
ylab = '$\log <u_{\\theta,\phi}>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
else:
ylab = '$<u_{\\theta,\phi}>_\mathrm{rms}$ $([u]=\mathrm{km/s})$'
else:
y = self.get(yaxis_thing,fname=dump,numtype=num_type,resolution='L', **kwargs)
ylab = yaxis_thing
if logy: ylab = 'log '+ylab
if num_type is 'ndump':
lab = label_case+', '+str(dump)
leg_tit = num_type
elif num_type is 'time':
idx = np.abs(self.get('t')-dump).argmin()
time = self.get('t')[idx]
time_min = time/60.
lab=label_case+', '+str("%.3f" % time_min)
leg_tit = 'time / min'
if logy:
pl.plot(Y,np.log10(y),utils.linestyle(i+ls_offset)[0],
markevery=utils.linestyle(i+ls_offset)[1],label=lab)
else:
pl.plot(Y,y,utils.linestyle(i+ls_offset)[0],
markevery=utils.linestyle(i+ls_offset)[1],label=lab)
if radbase is not None and dump is fname[0]:
pl.axvline(radbase,linestyle='dashed',color='k')
if radtop is not None and dump is fname[0]:
pl.axvline(radtop,linestyle='dashed',color='k')
i+=1
pl.xlabel('Radius $[1000\mathrm{km}]$')
pl.ylabel(ylab)
pl.legend(loc='best',title=leg_tit).draw_frame(False)
def get_mass_fraction(fluid,fname,resolution):
'''
Get mass fraction profile of fluid 'fluid' at fname with resolution
'resolution'.
'''
y = self.get(fluid,fname=fname,resolution=resolution)
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution=resolution)
else:
rhofluid = self.get('RHOconv',fname=fname,resolution=resolution)
rho = self.get('Rho',fname=fname,resolution=resolution)
y = rhofluid * y / rho
return y
def vprofs(self,fname,log_logic=False,lims=None,save=False,
prefix='PPM',format='pdf',initial_conv_boundaries=True,
lw=1., label=True):
"""
Plot velocity profiles v_tot, v_Y and v_XZ for a given cycle number
or list of cycle numbers (fname).
If a list of cycle number is given, separate figures are made for
each cycle. If one wishes to compare velocity profiles for two or
more cycles, see function vprof_time.
Parameters
----------
fname : int or list
Cycle number or list of cycle numbers to plot
log_logic : boolean
Should the velocity axis be logarithmic?
The default value is False
lims : list
Limits for the plot, i.e. [xl,xu,yl,yu].
If None, the default values are used.
The default is None.
save : boolean
Do you want the figures to be saved for each cycle?
Figure names will be <prefix>-Vel-00000000001.<format>,
where <prefix> and <format> are input options that default
to 'PPM' and 'pdf'.
The default value is False.
prefix : string
see 'save' above
format : string
see 'save' above
initial_conv_boundaries : logical
plot vertical lines where the convective boundaries are
initially, i.e. ad radbase and radtop from header
attributes in YProfiles
lw : float, optional
line width of the plot
label : string, optional
label for the line, if multiple models plotted
Examples
--------
import ppm
run='/rpod3/fherwig/PPM/RUNS_DATA/VLTP_MESA_M0.542/C1'
YY=ppm.yprofile(run)
YY.vprofs([90,100],log_logic=True)
"""
## fsize=14
## params = {'axes.labelsize': fsize,
## # 'font.family': 'serif',
## 'font.family': 'Times New Roman',
## 'figure.facecolor': 'white',
## 'text.fontsize': fsize,
## 'legend.fontsize': fsize,
## 'xtick.labelsize': fsize*0.8,
## 'ytick.labelsize': fsize*0.8,
## 'text.usetex': False}
## pl.rcParams.update(params)
if type(fname) is not list:
fname = [fname]
for dump in fname:
# if save or dump == fname[0]:
# pl.close(ifig),pl.figure(ifig)
# if not save and dump != fname[0]:
# pl.figure()
Y=self.get('Y',fname=dump,resolution='l')
Ek = self.get('Ek',fname=dump,resolution='l')
EkY = self.get('EkY',fname=dump,resolution='l')
EkXZ = self.get('EkXZ',fname=dump,resolution='l')
v = np.sqrt(2.*array(Ek,dtype=float))
vY = np.sqrt(array(EkY,dtype=float)) # no factor 2 for v_Y and v_XZ
vXZ = np.sqrt(array(EkXZ,dtype=float)) # no factor 2 for v_Y and v_XZ
line_labels = ['$v$','$v_\mathrm{r}$','$v_\perp$']
styles = ['-','--',':']
cb = utils.colourblind
if log_logic:
if label:
pl.plot(Y,np.log10(v*1000.),\
ls=styles[0],\
color=cb(0),\
label=line_labels[0],\
lw=lw)
pl.plot(Y,np.log10(vY*1000.),\
ls=styles[1],\
color=cb(8),\
label=line_labels[1],\
lw=lw)
pl.plot(Y,np.log10(vXZ*1000.),\
ls=styles[2],\
color=cb(2),\
label=line_labels[2],\
lw=lw)
ylab='log v$_\mathrm{rms}$ [km/s]'
else:
pl.plot(Y,np.log10(v*1000.),\
ls=styles[0],\
color=cb(0),\
lw=lw)
pl.plot(Y,np.log10(vY*1000.),\
ls=styles[1],\
color=cb(8),\
lw=lw)
pl.plot(Y,np.log10(vXZ*1000.),\
ls=styles[2],\
color=cb(2),\
lw=lw)
ylab='v$_\mathrm{rms}\,/\,\mathrm{km\,s}^{-1}$'
else:
if label:
pl.plot(Y,v*1000.,\
ls=styles[0],\
color=cb(0),\
label=line_labels[0],\
lw=lw)
pl.plot(Y,vY*1000.,\
ls=styles[1],\
color=cb(8),\
label=line_labels[1],\
lw=lw)
pl.plot(Y,vXZ*1000.,\
ls=styles[2],\
color=cb(2),\
label=line_labels[2],\
lw=lw)
else:
pl.plot(Y,v*1000.,\
ls=styles[0],\
color=cb(0),\
lw=lw)
pl.plot(Y,vY*1000.,\
ls=styles[1],\
color=cb(8),\
lw=lw)
pl.plot(Y,vXZ*1000.,\
ls=styles[2],\
color=cb(2),\
lw=lw)
ylab='v$_\mathrm{rms}\,/\,\mathrm{km\,s}^{-1}$'
if initial_conv_boundaries:
pl.axvline(self.radbase,linestyle='dashed',color='k')
pl.axvline(self.radtop,linestyle='dashed',color='k')
if lims is not None:
pl.axis(lims)
pl.xlabel('r / Mm')
pl.ylabel(ylab)
pl.title(prefix+', Dump '+str(dump))
if label:
pl.legend(loc=8).draw_frame(False)
number_str=str(dump).zfill(11)
if save:
pl.savefig(prefix+'-Vel-'+number_str+'.'+format,format=format)
def tEkmax(self,ifig=None,label=None,save=False,prefix='PPM',format='pdf',
logy=False,id=0):
"""
Plot maximum kinetic energy as a function of time.
Parameters
----------
ifig : int, optional
Figure number. If None, chose automatically.
The default is None.
label : string, optional
Label for the model
The default is None.
save : boolean, optional
Do you want the figures to be saved for each cycle?
Figure names will be <prefix>-t-EkMax.<format>,
where <prefix> and <format> are input options that default
to 'PPM' and 'pdf'.
The default value is False.
prefix : string, optional
see 'save' above
format : string, optional
see 'save' above
logy : boolean, optional
Should the y-axis have a logarithmic scale?
The default is False
id : int, optional
An id for the model, which esures that the lines are
plotted in different colours and styles.
The default is 0
Examples
--------
import ppm
run1='/rpod3/fherwig/PPM/RUNS_DATA/VLTP_MESA_M0.542/C1'
run2='/rpod3/fherwig/PPM/RUNS_DATA/sakurai-num-exp-robustness-onset-GOSH/A1/'
YY=ppm.yprofile(run1)
YY2=ppm.yprofile(run2)
YY.tEkmax(ifig=1,label='VLTP_0.543',id=0)
YY2.tEkmax(ifig=1,label='Sak_A1',id=1)
"""
fsize=14
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
pl.rcParams.update(params)
if ifig is None:
pl.figure()
else:
pl.figure(ifig)
t = self.get('t')
EkMax = self.get('EkMax') # Ek in 10^43 erg
if logy:
y = np.log10(EkMax*1.e43)
ylab = '$\log E_{\\rm k,max}/ {\\rm erg}$'
else:
y = EkMax * 1.e5 # EkMax in 10^38 erg
ylab = '$E_{\\rm k,max} / 10^{38}{\\rm \, erg}$'
if label is not None:
pl.plot(t/60,y,utils.linestyle(id)[0], markevery=utils.linestyle(id)[1],
label=label)
else:
pl.plot(t/60,y,utils.linestyle(id)[0], markevery=utils.linestyle(id)[1],
label=str(id))
pl.legend(loc='best').draw_frame(False)
pl.xlabel('t/min')
pl.ylabel(ylab)
if save:
pl.savefig(prefix+'-t-EkMax.'+format,format=format)
def tvmax(self,ifig=None,label=None,save=False,prefix='PPM',format='pdf',
logy=False,id=0):
"""
Plot maximum velocity as a function of time.
Parameters
----------
ifig : int, optional
Figure number. If None, chose automatically.
The default is None.
label : string, optional
Label for the model
The default is None.
save : boolean
Do you want the figures to be saved for each cycle?
Figure names will be <prefix>-t-vMax.<format>,
where <prefix> and <format> are input options that default
to 'PPM' and 'pdf'.
The default value is False.
prefix : string
see 'save' above
format : string
see 'save' above
logy : boolean, optional
Should the y-axis have a logarithmic scale?
The default is False
id : int, optional
An id for the model, which esures that the lines are
plotted in different colours and styles.
The default is 0
Examples
--------
import ppm
run1='/rpod3/fherwig/PPM/RUNS_DATA/VLTP_MESA_M0.542/C1'
run2='/rpod3/fherwig/PPM/RUNS_DATA/sakurai-num-exp-robustness-onset-GOSH/A1/'
YY=ppm.yprofile(run1)
YY2=ppm.yprofile(run2)
YY.tvmax(ifig=1,label='VLTP_0.543',id=0)
YY2.tvmax(ifig=1,label='Sak_A1',id=1)
"""
fsize=14
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
#pl.rcParams.update(params)
if ifig is None:
pl.figure()
else:
pl.figure(ifig)
t=self.get('t')
EkMax=self.get('EkMax')
vMax = 1000*np.sqrt(2.*EkMax) # velocity in km.s
if logy:
y = np.log10(vMax)
ylab = '$\log v_{\\rm max}/ {\\rm km\,s}^{-1}$'
else:
y = vMax
ylab = '$v_{\\rm max} / {\\rm km\,s}^{-1}$'
if label is not None:
pl.plot(t/60,y,utils.linestyle(id)[0], markevery=utils.linestyle(id)[1],
label=label)
else:
pl.plot(t/60,y,utils.linestyle(id)[0], markevery=utils.linestyle(id)[1],
label=str(id))
pl.legend(loc='best').draw_frame(False)
pl.xlabel('t/min')
pl.ylabel(ylab)
if save:
pl.savefig(prefix+'-t-vMax.'+format,format=format)
def Richardson_plot(self, fname1 = 0, fname2 = 2, R_low = None, R_top = None, \
do_plots = False, logRi_levels = [-1., -0.6, 0., 1., 2., 3.], \
ylim_max = 2.02, compressible_fluid = True, plot_type = 0, \
ifig = 101):
'''
Make a plot of radius vs tangential velocity in the vicinity of the
boundary and draw on lines of constant Richardson number. Compared to
the function that produced Fig. 9 of Woodward+ (2015) this one takes
into account the compressibility of the gas. Several bugs have been
removed, too.
Parameters
----------
fname1 : int
Which dump do you want to take the stratification from?
fname2 : int
Which dump do you want to take the velocities from?
R_low : float
The minimum radius in the plot. If invalid or None it will be set
to R_top - 1.
R_top : float
Radius of top of convection zone. If invalid or None it will be set
to the radius at which FV H+He = 0.9.
do_plots : logical
Do you want to do some intermittent plotting?
logRi_levels : list, optional
Values of Ri for which to draw contours.
ylim_max : float
Max of ylim (min is automatically determined) in the final plot.
compressible_fluid : logical
You can set it to False to use the Richardson criterion for
an incompressible fluid.
plot_type : int
plot_type = 0: Use a variable lower endpoint and a fixed upper endpoint of
the radial interval, in which Ri is calculated. Ri is plotted
for a range of assumed velocity differences with respect to
the upper endpoint.
plot_type = 1: Compute Ri locally assuming that the local velocities vanish
on a certain length scale, which is computed from the radial
profile of the RMS horizontal velocity.
ifig : int
Figure number for the Richardson plot (a new window must be opened).
'''
# the whole calculation is done in code units
# the unit of G in the code is 10^{-3} g cm^3 s^{-2}
G_code = ast.grav_const/1e-3
def diff(x):
# compute 2nd order centred differences
dx = (np.roll(x, -1) - np.roll(x, 1))/2.
# 1st order differences to correct the boundaries
dx[0] = x[1] - x[0]
dx[-1] = x[-1] - x[-2]
return dx
if fname1 < 0 or fname1 > np.max(self.ndumpDict.keys()):
raise IOError("fname1 out of range.")
if fname2 == 0:
raise IOError("Velocities at fname2=0 will be 0; please "+\
"make another choice")
if fname2 < 0 or fname2 > np.max(self.ndumpDict.keys()):
raise IOError("fname2 out of range.")
if plot_type != 0 and plot_type != 1:
print "plot_type = %s is not implemented." % str(plot_type)
return
# get some header attributes
R_bot = float(self.hattrs['At base of the convection zone R'])
g_bot = float(self.hattrs['At base of the convection zone g'])
# get the stratification at fname = fname1
# radius is called 'Y' in the YProfiles
r = self.get('Y', fname = fname1, resolution = 'l')
fv_H_He = self.get('FV H+He', fname = fname1, resolution = 'l')
rho = self.get('Rho', fname = fname1, resolution = 'l')
p = self.get('P', fname = fname1, resolution = 'l')
# get the rms horizontal velocities at fname = fname2
ek_xz = self.get('EkXZ', fname = fname2, resolution = 'l')
rms_u_xz = np.sqrt(ek_xz) # no factor of 2 for ek_xz (known bug)
# pre-compute logarithms of some variables to speed up the code
logrho = np.log(rho)
logp = np.log(p)
min_r = np.min(r)
max_r = np.max(r)
dr = diff(r)
if R_top is not None:
if R_top < min_r:
print "R_top too low."
return
elif R_top > max_r:
print "R_top too high."
return
else:
# centre R_top on the nearest cell
idx_top = np.argmin(np.abs(r - R_top))
R_top = r[idx_top]
print "R_top centred on the nearest cell: R_top = %.3f." % R_top
else:
# put R_top where fv_H_He is the closest to 0.9
idx_top = np.argmin(np.abs(fv_H_He - 0.9))
R_top = r[idx_top]
print "R_top set to %.3f." % R_top
if R_low is not None:
if R_low < min_r:
print "R_low too low."
return
elif R_low >= R_top:
print "R_low too high."
return
else:
# centre R_low on the nearest cell
idx_low = np.argmin(np.abs(r - R_low))
R_low = r[idx_low]
print "R_low centred on the nearest cell: R_low = %.3f." % R_low
else:
# the default setting
R_low = R_top - 1.
if R_low < min_r:
R_low = min_r
# find the point nearest to r = R_low
idx_low = np.argmin(np.abs(r - R_low))
R_low = r[idx_low]
print "R_low centred on the cell nearest to R_top - 1: R_low = %.3f." % R_low
# centre R_bot on the nearest cell
idx_bot = np.argmin(np.abs(r - R_bot))
# mass below R_bot
# (using r[idx_bot] instead of R_bot makes g[idx_bot] == g_bot)
M_bot = g_bot*(r[idx_bot]**2)/G_code
dm = 4.*np.pi*(r**2)*dr*rho
m = np.cumsum(dm) # get the mass profile by integration
# shift the mass profile to make sure that m[idx_bot] == M_bot
# the mass profile at small radii won't make sense, because
# the core is artificial with no gravity
m += M_bot - m[idx_bot]
g = G_code*m/(r**2) # gravity profile (see the note above)
H_p = p/(rho*g) # pressure scale height (assuming hydrostatic equilibrium)
nabla_ad = 0.4 # adiabatic temperature gradient
nabla_rho_ad = 1. - nabla_ad # adiabatic density gradient
# compute the Richardson number for the shearing flow
# between r[idx_1] and r[idx_2] (it's faster when using
# indices instead of radii)
# arbitrary du or dudr can be supplied
def Richardson(idx_1, idx_2, du = None, dudr = None):
# average g and H_p between idx_1 and idx_2
g_avg = (g[idx_1] + g[idx_2])/2.
H_p_avg = (H_p[idx_1] + H_p[idx_2])/2.
# approximate density gradient between idx_1 and idx_2
dlogrho = logrho[idx_2] - logrho[idx_1]
dlogp = logp[idx_2] - logp[idx_1]
nabla_rho = dlogrho/dlogp
# buoyancy frequency squared
if compressible_fluid:
N2 = (g_avg/H_p_avg)*(nabla_rho - nabla_rho_ad)
else:
N2 = (g_avg/H_p_avg)*nabla_rho
# compute the velocity difference we have no information
# about the velocity difference or gradient
if du is None and dudr is None:
du = rms_u_xz[idx_2] - rms_u_xz[idx_1]
# compute the velocity gradient if none was supplied
if dudr is None:
dr = r[idx_2] - r[idx_1]
dudr = du/dr
# velocity gradient squared
dudr2 = (dudr)**2
# Richardson number
Ri = N2/dudr2
return Ri
if plot_type == 0:
# grid of indices for a sequence of intervals, in which Ri will be computed
idx_grid = np.arange(idx_top + 1, idx_low + 1)
# the corresponding grid of radii
r_grid = np.array([r[i] for i in idx_grid])
# reference velocity
u_0 = rms_u_xz[idx_top]
# velocity limit for the plot
# the factor of 1e-3 converts the velocity from km/s to
# code units (10^8 cm s^{-1})
u_max = 1e-3*(10**ylim_max)
# construct grids of assumed velocities and velocity differences
# logarithmic spacing between (u_0 + log10u_step) and u_max
log10u_step = 0.02
u_grid = 10**np.arange(np.log10(rms_u_xz[idx_top]) + log10u_step, \
np.log10(u_max), log10u_step)
du_grid = u_grid - u_0
# perhaps the loops could be optimised, but it runs fast enough as is
Ri = np.zeros((du_grid.size, idx_grid.size))
for i in range(0, du_grid.size):
for j in range(0, idx_grid.size):
Ri[i, j] = Richardson(idx_grid[j], idx_top, du = du_grid[i])
pl.close(ifig)
pl.figure(ifig)
# pl.contour() fails if np.log10(Ri) is undefined everywhere
if any(Ri > 0):
cs = pl.contour(r_grid, np.log10(1e3*u_grid), np.log10(Ri), \
logRi_levels, linestyles = '-')
pl.clabel(cs)
# pl.contour() fails if np.log10(Ri) is undefined everywhere
if any(Ri < 0):
cs2 = pl.contour(r_grid, np.log10(1e3*u_grid), np.log10(-Ri), \
logRi_levels, linestyles = '--')
pl.clabel(cs2)
pl.plot(r, np.log10(1e3*rms_u_xz + 1e-100), \
marker = 'o', markevery = utils.linestyle(0)[1], \
label = ("velocity at dump %d" % fname2))
pl.xlabel(r'$r\ \mathrm{[Mm]}$', fontsize = 16)
pl.ylabel(r"$\log\ u_\mathrm{hor,rms}\ \mathrm{[km/s]}$", fontsize = 16)
pl.xlim(R_low, R_top)
pl.ylim(np.log10(1e3*rms_u_xz[idx_top]), np.log10(1e3*u_max))
pl.legend(loc = 3)
elif plot_type == 1:
# the steepest velocity gradient between R_low and R_top
dudr = diff(rms_u_xz)/dr
max_dudr = np.max(np.abs(dudr[idx_top:(idx_low + 1)]))
# characteristic length scale on which velocities decrease at the boundary
l_u = np.max(rms_u_xz[idx_top:(idx_low + 1)])/max_dudr
print "Velocities decrease on a characteristic length scale of %.2f Mm" % l_u
# grid of positions, at which Ri will be computed
idx_grid = np.arange(idx_top, idx_low + 1)
# the corresponding grid of radii
r_grid = np.array([r[i] for i in idx_grid])
# estimate the local velocity gradient assuming that the local velocities
# vanish on the length scale l_u
dudr_estimate = rms_u_xz[idx_top:(idx_low + 1)]/l_u
# compute the local Richardson numbers
Ri = np.zeros(idx_grid.size)
for i in range(0, idx_grid.size):
Ri[i] = Richardson(idx_grid[i] - 1, idx_grid[i] + 1, dudr = dudr_estimate[i])
# determine the upper limit for Ri in the plot
min_log10absRi = np.min(np.log10(np.abs(Ri)))
# always keep some margin
if min_log10absRi - np.floor(min_log10absRi) > 0.1:
pl_Ri_min = np.floor(min_log10absRi)
else:
pl_Ri_min = np.floor(min_log10absRi) - 1.
# determine the upper limit for Ri in the plot
max_log10absRi = np.max(np.log10(np.abs(Ri)))
# always keep some margin
if np.ceil(max_log10absRi) - max_log10absRi > 0.1:
pl_Ri_max = np.ceil(max_log10absRi)
else:
pl_Ri_max = np.ceil(max_log10absRi) + 1.
# FV values smaller than 10^{-8} are not interesting
min_log10fv_H_He = np.min(np.log10(fv_H_He[idx_top:(idx_low + 1)] + 1e-100))
if min_log10fv_H_He < -8.:
min_log10fv_H_He = -8.
# do we need to shift the FV curve in the plot?
fv_offset = 0
if pl_Ri_min > min_log10fv_H_He:
fv_offset = pl_Ri_max
pl.close(ifig)
fig = pl.figure(ifig)
ax = fig.add_subplot(111)
lns = [] # array of lines to be put into a joint legend
if any(Ri < 0):
# temporarily suppress numpy warnings
old_settings = np.seterr()
np.seterr(invalid = 'ignore')
lns += ax.plot(r_grid, np.log10(Ri), linestyle = '-', linewidth = 2, \
label = r'$Ri > 0$')
lns += ax.plot(r_grid, np.log10(-Ri), linestyle = '--', linewidth = 2, \
label = r'$Ri < 0$')
np.seterr(**old_settings)
else:
lns += ax.plot(r_grid, np.log10(Ri), linestyle = '-', linewidth = 2, \
label = r'$Ri$')
lns += ax.plot(r_grid, np.log10(fv_H_He[idx_top:(idx_low + 1)] + 1e-100) + \
fv_offset, linestyle = '-', label = r'$FV$')
ax.set_xlim(R_low, R_top)
ax.set_ylim(pl_Ri_min, pl_Ri_max)
ax.set_xlabel(r'$\mathrm{radius\ [Mm]}$', fontsize = 16)
if fv_offset == 0:
ax.set_ylabel(r'$\log Ri; \log FV$', fontsize = 16)
else:
ax.set_ylabel(r'$\log Ri;\ \log FV + %d$' % fv_offset, fontsize = 16)
ax2 = ax.twinx() # get a new set of axes
lns += ax2.plot(r_grid, 1e3*rms_u_xz[idx_top:(idx_low + 1)], linestyle = '-.', \
label = r'$u_\mathrm{hor,rms}$')
ax2.set_ylabel(r'$u_\mathrm{hor,rms}\ \mathrm{[km/s]}$', fontsize = 16)
lbls = [l.get_label() for l in lns]
ax2.legend(lns, lbls, loc = 2)
# show some plots to inspect more variables
if do_plots:
i1 = idx_bot
i2 = np.argmax(r)
if i1 > i2: tmp = i1; i1 = i2; i2 = tmp
pl.close(102); pl.figure(102)
pl.plot(r[i1:i2], 5.025e-07*m[i1:i2])
pl.xlabel("radius [Mm]")
pl.ylabel("mass [M_Sun]")
pl.title("enclosed mass")
pl.close(103); pl.figure(103)
pl.plot(r[i1:i2], 1e8*g[i1:i2])
pl.xlabel("radius [Mm]")
pl.ylabel("gravity [cm/s^2]")
pl.title("gravity")
pl.close(104); pl.figure(104)
pl.plot(r[i1:i2], np.log10(1e3*rho[i1:i2]))
pl.xlabel("radius [Mm]")
pl.ylabel("log10(rho [g/cm^3])")
pl.title("log10 density")
pl.close(105); pl.figure(105)
pl.plot(r[i1:i2], np.log10(1e19*p[i1:i2]))
pl.xlabel("radius [Mm]")
pl.ylabel("log10(p [g/(cm s^2)])")
pl.title("log10 pressure")
pl.close(106); pl.figure(106)
pl.plot(r[i1:i2], H_p[i1:i2])
pl.xlabel("radius [Mm]")
pl.ylabel("Hp [Mm]")
pl.title("pressure scale height")
pl.close(107); pl.figure(107)
nabla_rho = diff(logrho)/diff(logp)
pl.plot(r[i1:i2], nabla_rho[i1:i2])
pl.xlabel("radius [Mm]")
pl.ylabel("nabla_rho")
pl.title("density gradient")
pl.close(108); pl.figure(108)
nabla_rho = diff(logrho)/diff(logp)
N2 = (g/H_p)*(nabla_rho - nabla_rho_ad)
pl.plot(r[i1:i2], N2[i1:i2])
pl.xlabel("radius [Mm]")
pl.ylabel("N^2 [1/s^2]")
pl.title("buoyancy frequency")
pl.show() # show everything we have plotted
def Dov(self,r0,D0,f,fname=1):
'''
Calculate and plot an exponentially decaying diffusion coefficient
given an r0, D0 and f.
Dov is given by the formula D0*exp(-2*(r-r0)/f*Hp)
Parameters
----------
r0 : float
radius (Mm) at which the decay will begin
D0 : float
diffusion coefficient at r0
f : float
what f-value (parameter, where f*Hp is the e-folding length
of the diffusion coefficient) should we decay with?
fname : int
which dump do you want to take r and P from?
Output
------
r : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
the decays exponentially
D : array
Exponentially decaying diffusion coefficient (cm^2/s)
'''
# Hp = 1 / d lnP/dr = P / (d)
r = self.get('Y',fname=fname,resolution='l')[::-1] * 1.e8 # cm, centre to surface
P = self.get('P',fname=fname)[::-1] * 1.e19 # barye, centre to surface
Hp = - P[1:] * np.diff(r) / np.diff(P)
Hp = np.insert(Hp,0,0)
idx = np.abs(r - r0*1.e8).argmin()
r0 = r[idx]
Hp0 = Hp[idx]
print r0, Hp0, idx
D = D0 * np.exp(-2. * (r[idx:] - r0) / f / Hp0)
return r[idx:] / 1.e8, D
def Dov2(self,r0,D0,f1=0.2,f2=0.05,fname=1):
'''
Calculate and plot an 2-parameter exponentially decaying diffusion coefficient
given an r0, D0 and f1 and f2.
Dov is given by the formula:
D = 2. * D0 * 1. / (1. / exp(-2*(r-r0)/f1*Hp) +
1. / exp(-2*(r-r0)/f2*Hp))
Parameters
----------
r0 : float
radius (Mm) at which the decay will begin
D0 : float
diffusion coefficient at r0
f1,f2,A : float
parameters of the model
fname : int
which dump do you want to take r and P from?
Output
------
r : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
the decays exponentially
D : array
Exponentially decaying diffusion coefficient (cm^2/s)
'''
# Hp = 1 / d lnP/dr = P / (d)
r = self.get('Y',fname=fname,resolution='l')[::-1] * 1.e8 # cm, centre to surface
P = self.get('P',fname=fname)[::-1] * 1.e19 # barye, centre to surface
Hp = - P[1:] * np.diff(r) / np.diff(P)
Hp = np.insert(Hp,0,0)
idx = np.abs(r - r0*1.e8).argmin()
r0 = r[idx]
Hp0 = Hp[idx]
print r0, Hp0, idx
D = 2. * D0 * 1./(1./(np.exp(-2. * (r - r0) / f1 / Hp0)) +
1./(np.exp(-2. * (r - r0) / f2 / Hp0))
)
return r / 1.e8, D
def Dinv(self,fname1,fname2,fluid='FV H+He',numtype='ndump',newton=False,
niter=3,debug=False,grid=False,FVaverage=False,tauconv=None,
returnY=False,plot_Dlt0=True):
'''
Solve inverted diffusion equation to see what diffusion coefficient
profile would have been appropriate to mimic the mixing of species
seen in the Yprofile dumps.
In the present version, we only solve for D in the region where
'FV H+He' is not 0 or 1 in dump fname 2.
Parameters
----------
fname1,fname2 : int or float
cycles from which to take initial and final abundance profiles
for the diffusion step we want to mimic.
fluid : string
Which fluid do you want to track?
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp.
The default is "ndump".
newton : boolean, optional
Whether or not to apply Newton-Raphson refinement of the
solution for D.
The default is False
niter : int, optional
If N-R refinement is to be done, niter is how many iterations
to compute.
The default is 3.
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
plot_Dlt0 : boolean, optional
whether or not to plot the diffusion coefficient when it is
negative
Output
------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
Example
-------
YY=ppm.yprofile(path_to_data)
YY.Dinv(1,640)
'''
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print xlong
x = xlong
x = x * 1.e8
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
if debug: print len(xlong), len(y0long)
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
# now we want to exclude any zones where the abundances
# of neighboring cells are the same. This is hopefully
# rare inside the computational domain and limited to only
# a very small number of zones
indexarray = np.where(np.diff(y1) == 0)[0]
print 'removing zones:', indexarray
y1 = np.delete(y1,indexarray)
y0 = np.delete(y0,indexarray)
x = np.delete(x,indexarray)
# in the current formulation for the inner boundary condition,
# y1[0] != 0:
while y1[0] == 0.:
x = x[1:]
y0 = y0[1:]
y1 = y1[1:]
# Try moving left boundary one over to allow for "exact"
# boundary condition and saving the now ghost cell value
xl = x[100]
y0l = y0[100]
y1l = y1[100]
x = x[101:]
y0 = y0[101:]
y1 = y1[101:]
if debug : print y0, y1, deltat
print 'deltat = ', deltat, 's'
p = np.zeros(len(x))
q = np.zeros(len(x))
xdum = np.zeros(3) # our workhorse array for differencing
dt = float(deltat)
# calculate matrix elements for intermediate mesh points:
def matrixdiffus(x, y0, y1, dt):
m = len(x) - 1
for i in range(1,m):
xdum[0] = x[i-1]
xdum[1] = x[i]
xdum[2] = x[i+1]
xl = xdum[1] - xdum[0]
xr = xdum[2] - xdum[1]
xm = 0.5 * (xdum[2] - xdum[0])
alpha = dt / xm
p[i] = (y1[i] - y1[i-1]) * alpha / xl
q[i] = (y1[i] - y1[i+1]) * alpha / xr
# central (left) boundary:
xdum[1] = x[0]
xdum[2] = x[1]
xr = xdum[2] - xdum[1]
alpha = dt / (xr * xr)
# p[0] = y1[i] * alpha
# q[0] = (y1[i] - y1[i+1]) * alpha
# p[0] = y1[0] * alpha
# q[0] = (y1[0] - y1[1]) * alpha
# Trying new boundary conditions:
p[0] = (y1[0] - y1l) * alpha
q[0] = (y1[0] - y1[1]) * alpha
print 'p0, q0 = ', p[0],q[0]
# surface (right) boundary:
xdum[0] = x[m-1]
xdum[1] = x[m]
xl = xdum[1] - xdum[0]
alpha = dt / (xl * xl)
# p[m] = (y1[i] - y1[i-1]) * alpha
# q[m] = 0
p[m] = (y1[m] - y1[m-1]) * alpha
q[m] = 0.
print 'pm, qm = ', p[m],q[m]
G = np.zeros([len(x),len(x)])
# set up matrix:
for i in range(len(x)):
print 'p[i] = ', p[i]
G[i,i] = p[i]
if debug : print G[i,i]
if i != len(x)-1 :
G[i,i+1] = q[i]
if debug : print G[i,i+1]
A = np.array( [ G[i,:] for i in range(len(x)) ] )
print A[0]
print 'determinant = ', np.linalg.det(A)
return A
# Direct solution (initial guess if moving on to do Newton-
# Raphson refinement:
A = matrixdiffus(x,y0,y1,dt)
B = y0 - y1
D = np.linalg.solve(A,B)
if newton:
x0 = D
xn = np.zeros(len(x0))
xn = x0
xnp1 = np.zeros(len(x0))
J = np.linalg.inv(A) # Jacobian matrix
# refinement loop:
for i in range(1,niter+1):
f = np.dot(A,xn) - B
xnp1 = xn - np.dot(J,f)
corr = np.abs(xnp1 - xn) / xn
cmax = max(corr)
cmin = min(corr)
print 'NEWTON: iter '+str(i)
print 'max. correction = ', cmax
print 'min. correction = ', cmin
xn = xnp1
D = xnp1
cb = utils.colourblind
lsty = utils.linestyle
pl.figure()
pl.plot(xlong,np.log10(y0long),\
marker='o',
color=cb(8),\
markevery=lsty(1)[1],\
label='$X_{'+str(fname1)+'}$')
pl.plot(xlong,np.log10(y1long),\
marker='o',\
color=cb(9),\
markevery=lsty(2)[1],\
label='$X_{'+str(fname2)+'}$')
# pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.ylabel('$\log\,X$ ')
pl.xlabel('r / Mm')
pl.ylim(-8,0.1)
pl.legend(loc='center right').draw_frame(False)
if grid:
pl.grid()
pl.twinx()
pl.plot(x/1.e8,np.log10(D),'k-',\
label='$D$') #'$D > 0$')
if plot_Dlt0:
pl.plot(x/1.e8,np.log10(-D),'k--',\
label='$D < 0$')
pl.ylabel('$\log(D\,/\,{\\rm cm}^2\,{\\rm s}^{-1})$')
pl.legend(loc='upper right').draw_frame(False)
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def Dsolve(self,fname1,fname2,fluid='FV H+He',numtype='ndump',newton=False,niter=3,
debug=False,grid=False,FVaverage=False,tauconv=None,returnY=False):
'''
Solve inverse diffusion equation sequentially by iterating over the spatial
domain using a lower boundary condition (see MEB's thesis, page
223, Eq. B.15).
Parameters
----------
fname1,fname2 : int or float
cycles from which to take initial and final abundance profiles
for the diffusion step we want to mimic.
fluid : string
Which fluid do you want to track?
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp.
The default is "ndump".
newton : boolean, optional
Whether or not to apply Newton-Raphson refinement of the
solution for D.
The default is False
niter : int, optional
If N-R refinement is to be done, niter is how many iterations
to compute.
The default is 3.
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
Output
------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
Example
-------
YY=ppm.yprofile(path_to_data)
YY.Dsolve(1,640)
'''
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print xlong
x = xlong
x = x * 1.e8
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
if debug: print len(xlong), len(y0long)
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
# now we want to exclude any zones where the abundances
# of neighboring cells are the same. This is hopefully
# rare inside the computational domain and limited to only
# a very small number of zones
indexarray = np.where(np.diff(y1) == 0)[0]
print 'removing zones:', indexarray
y1 = np.delete(y1,indexarray)
y0 = np.delete(y0,indexarray)
x = np.delete(x,indexarray)
# in the current formulation for the inner boundary condition,
# y1[0] != 0:
while y1[0] == 0.:
x = x[1:]
y0 = y0[1:]
y1 = y1[1:]
# Try moving left boundary one over to allow for "exact"
# boundary condition and saving the now ghost cell value
xl = x[0]
y0l = y0[0]
y1l = y1[0]
x = x[1:]
y0 = y0[1:]
y1 = y1[1:]
if debug : print y0, y1, deltat
print 'deltat = ', deltat, 's'
p = np.zeros(len(x))
q = np.zeros(len(x))
xdum = np.zeros(3) # our workhorse array for differencing
dt = float(deltat)
# Calculate D starting from inner boundary:
D = np.zeros(len(x))
# inner boundary:
xr = x[1] - x[0]
xl = xr
xm = (xl + xr) / 2.
p = (y1[0] - y1l) / xl
s = xm * (y1[0] - y0[0]) / dt
u = xr / (y1[1] - y1[0])
Dghost = 0. # is this OK?
D[0] = u * (s + Dghost * p)
# now do the rest:
for i in range(1,len(x)-1):
xr = x[i+1] - x[i]
xl = x[i] - x[i-1]
xm = (xl + xr) / 2.
p = (y1[i] - y1[i-1]) / xl
s = xm * (y1[i] - y0[i]) / dt
u = xr / (y1[i+1] - y1[i])
D[i] = u * (s + D[i-1] * p)
# outer boundary:
m = len(x) - 1
xl = x[m] - x[m-1]
xr = xl
xm = (xl + xr) / 2.
p = (y1[m] - y1[m-1]) / xl
s = xm * (y1[m] - y0[m]) / dt
u = xr / (1. - y1[m]) # assuming here that y[m+1] - 1.. is this OK??
D[m] = u * (s + D[m-1] * p)
pl.figure()
# pl.plot(xlong,np.log10(y0long),utils.linestyle(1)[0],\
# markevery=utils.linestyle(1)[1],\
# label=fluid.replace('FV','')+' '+str(fname1))
# pl.plot(xlong,np.log10(y1long),utils.linestyle(2)[0],\
# markevery=utils.linestyle(2)[1],\
# label=fluid.replace('FV','')+' '+str(fname2))
pl.plot(xlong,np.log10(y0long),utils.linestyle(1)[0],\
markevery=utils.linestyle(1)[1],\
label='fluid above'+' '+str(fname1))
pl.plot(xlong,np.log10(y1long),utils.linestyle(2)[0],\
markevery=utils.linestyle(2)[1],\
label='fluid above'+' '+str(fname2))
pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.xlabel('r / Mm')
pl.ylim(-8,0.1)
pl.legend(loc='lower left').draw_frame(False)
if grid:
pl.grid()
pl.twinx()
pl.plot(x/1.e8,np.log10(D),'k-',\
label='$D$') #'$D > 0$')
pl.plot(x/1.e8,np.log10(-D),'k--',\
label='$D < 0$')
pl.ylabel('$\log D\,/\,{\\rm cm}^2\,{\\rm s}^{-1}$')
pl.legend(loc='upper right').draw_frame(False)
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def Dsolvedown(self,fname1,fname2,fluid='FV H+He',numtype='ndump',
newton=False,niter=3,debug=False,grid=False,FVaverage=False,
tauconv=None,returnY=False,smooth=False,plot_Dlt0=True):
'''
Solve diffusion equation sequentially by iterating over the spatial
domain inwards from the upper boundary.
Parameters
----------
fname1,fname2 : int or float
cycles from which to take initial and final abundance profiles
for the diffusion step we want to mimic.
fluid : string
Which fluid do you want to track?
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp.
The default is "ndump".
newton : boolean, optional
Whether or not to apply Newton-Raphson refinement of the
solution for D.
The default is False
niter : int, optional
If N-R refinement is to be done, niter is how many iterations
to compute.
The default is 3.
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
smooth : boolean, optional
Smooth the abundance profiles with a spline fit, enforcing their
monotonicity. Only works for FV H+He choice of fluid
plot_Dlt0 : boolean, optional
whether or not to plot D where it is <0
the default value is True
Output
------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
Example
-------
YY=ppm.yprofile(path_to_data)
YY.Dsolvedown(1,640)
'''
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print xlong
x = xlong
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
def make_monotonic(r,x):
'''
function for making x monotonic, as the solution to the diffusion
equation should be. Only works when considering FV H+He (not conv)
'''
from scipy.interpolate import UnivariateSpline as uvs
xorig = x
rorig = r
xnew0 = np.zeros(len(x))
# find top and botom of convection zone
ib = np.where(x!=0)[0][-1]
it = np.where(x!=1)[0][0]
x = x[it:ib]
r = r[it:ib]
x = np.maximum(x,1.e-12)
x = np.log10(x)
# is there a local min?
dx = np.diff( x )
c = len( np.where( dx > 0 )[0] ) > 0
# find midpoint, left and right sides of linear reconstruction
if c:
# there is a local minimum, but also a local maximum
# find the min by going top inwards and seeing where
# difference changes sign
# continue on to find the max, with which we set the width of the linear
# reconstruction region
# for i in range(len(r)):
# if dx[i] > 0.:
# idxmin = i
# break
# ask user for now to identify the local min (noisy data)
pl.plot(r,x)
usr_rmin = raw_input("Please input local min in Mm: ")
usr_rmin = float(usr_rmin)
idxmin = np.abs( r - usr_rmin ).argmin()
for i in range(idxmin,len(r)):
if x[i] == max(x[idxmin:]):
idxmax = i
break
else:
# everything was OK-ish anyway
return xorig
# to take local max as left side as interval:
if False:
il = idxmax
# set left side of interval to radius where x drops below the local minimum:
if True:
for i in range(idxmin,len(r)):
if x[i] < x[idxmin]:
il = i
break
rmid = r[idxmin]
width = ( rmid - r[il] ) * 2.
rl = rmid - width / 2.
rr = rmid + width / 2.
il = np.abs( r - rl ).argmin()
ir = np.abs( r - rr ).argmin()
# just sort the two ends
rt = r[:ir]
rb = r[il:]
xt = np.array(sorted(x[:ir])[::-1])
xb = np.array(sorted(x[il:])[::-1])
# now we fit the reconstruction region
def expfunc(x, a, c, d):
return a*np.exp(c*x)+d
if True:
rm = r[ir:il]
xms = sorted(x[ir:il])[::-1]
xms = np.array(xms)
from scipy.optimize import curve_fit
# fit an exponential
#popt, pcov = curve_fit(expfunc, rm, xms, p0=(1, 1, 1))
# linear reconstruction
m = ( xms[-1] - xms[0] ) / ( rm[-1] - rm[0] )
c = xms[0] - m * rm[0]
# now extend it <bw> Mm beyond the l and r bounds of the reconstruction
# region so that we can do a blend
bw = 0.1
idxr = np.abs( r - ( rr + bw ) ).argmin()
idxl = np.abs( r - ( rl - bw ) ).argmin()
rm = r[idxr:idxl]
# exponential:
#xm = func(rm, *popt)
# linear:
xm = m * rm + c
#now combine back the results with a sinusoidal blend at each overlap of the
# reconstruction region with the top and bottom components
xnew = np.zeros(len(x))
# top
itr = np.abs(rt-(rr+bw)).argmin()
xnew[:idxr] = xt[:itr]
# bottom
ibl = np.abs(rb-(rl-bw)).argmin()
xnew[idxl:] = xb[ibl:]
# upper middle
imrbb = np.abs( rm - rr ).argmin()
xnew[idxr:ir] = xt[itr:] * np.sin( np.abs(rt[itr:] - r[ir]) / bw * np.pi / 2. ) ** 2 + \
xm[:imrbb] * np.cos( np.abs(rm[:imrbb] - r[ir]) / bw * np.pi / 2. ) ** 2
# lower middle
imlbt = np.abs( rm - rl ).argmin()
xnew[il:idxl] = xb[:ibl] * np.sin( np.abs(rb[:ibl] - r[il]) / bw * np.pi / 2. ) ** 2 + \
xm[imlbt:] * np.cos( np.abs(rm[imlbt:] - r[il]) / bw * np.pi / 2. ) ** 2
# middle
xnew[ir:il] = xm[imrbb:imlbt]
xnew0[it:ib] = xnew[:]
xnew0 = 10. ** xnew0
xnew0[:it] = 1.
xnew0[ib:] = 0.
return xnew0
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if debug: print len(xlong), len(y0long)
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
if smooth:
y1 = make_monotonic(x,y1)
#y0 = make_monotonic(x,y0)
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
# restrict the computational domain to only where the regions are mixed
idxu = np.where( y1 != 1. )[0][-1] + 1
idxl = np.where( y1 != 0. )[0][0] - 1
print idxl, idxu
print y1
y1 = y1[idxl:idxu]
y0 = y0[idxl:idxu]
x = x[idxl:idxu]
print x[0], x[-1]
# now we want to exclude any zones where the abundances
# of neighboring cells are the same. This is hopefully
# rare inside the computational domain and limited to only
# a very small number of zones
indexarray = np.where(np.diff(y1) == 0)[0]
print 'removing zones:', indexarray
y1 = np.delete(y1,indexarray)
y0 = np.delete(y0,indexarray)
x = np.delete(x,indexarray)
dt = float(deltat)
# Calculate D starting from outer boundary:
D = np.zeros(len(x))
m = len(x) - 1
# now do the solution:
for i in range(m,1,-1):
xl = np.float64(x[i] - x[i-1])
r = np.float64(y0[i] - y1[i])
p = np.float64(dt * (y1[i] - y1[i-1]) / (xl * xl))
if i == m:
D[i] = np.float64(r / p)
else:
xr = np.float64(x[i+1] - x[i])
xm = np.float64(xl + xr) / 2.
q = np.float64(dt * (y1[i] - y1[i+1]) / (xr * xm))
D[i] = np.float64((r - q * D[i+1]) / p)
D = D * 1.e16 # Mm^2/s ==> cm^2/s
print D
x = x * 1e8 # Mm ==> cm
cb = utils.colourblind
lsty = utils.linestyle
pl.figure()
pl.plot(xlong,np.log10(y0long),\
marker='o',
color=cb(8),\
markevery=lsty(1)[1],\
mec = cb(8),
mew = 1.,
mfc = 'w',
label='$X_{'+str(fname1)+'}$')
pl.plot(xlong,np.log10(y1long),\
marker='o',\
color=cb(9),\
lw=0.5,
markevery=lsty(2)[1],\
label='$X_{'+str(fname2)+'}$')
pl.ylabel('$\log_{10}\,X$ ')
pl.xlabel('$\mathrm{r\,/\,Mm}$')
pl.ylim(-8,0.1)
pl.legend(loc='center right').draw_frame(False)
if grid:
pl.grid()
pl.twinx()
pl.plot(x/1.e8,np.log10(D),'k-',\
label='$D$') #'$D > 0$')
if plot_Dlt0:
pl.plot(x/1.e8,np.log10(-D),'k--',\
label='$D < 0$')
pl.ylabel('$\log_{10}(D\,/\,{\\rm cm}^2\,{\\rm s}^{-1})$')
pl.legend(loc='upper right').draw_frame(False)
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def Dsolvedownexp(self,fname1,fname2,fluid='FV H+He',numtype='ndump',newton=False,niter=3,
debug=False,grid=False,FVaverage=False,tauconv=None,returnY=False):
'''
Solve diffusion equation sequentially by iterating over the spatial
domain inwards from the upper boundary. This version of the method is
explicit.
Parameters
----------
fname1,fname2 : int or float
cycles from which to take initial and final abundance profiles
for the diffusion step we want to mimic.
fluid : string
Which fluid do you want to track?
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp.
The default is "ndump".
newton : boolean, optional
Whether or not to apply Newton-Raphson refinement of the
solution for D.
The default is False
niter : int, optional
If N-R refinement is to be done, niter is how many iterations
to compute.
The default is 3.
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
Output
------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
Example
-------
YY=ppm.yprofile(path_to_data)
YY.Dsolvedown(1,640)
'''
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print xlong
x = xlong
# x = x * 1.e8
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
if debug: print len(xlong), len(y0long)
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
# now we want to exclude any zones where the abundances
# of neighboring cells are the same. This is hopefully
# rare inside the computational domain and limited to only
# a very small number of zones
indexarray = np.where(np.diff(y0) == 0)[0]
print 'removing zones:', indexarray
y1 = np.delete(y1,indexarray)
y0 = np.delete(y0,indexarray)
x = np.delete(x,indexarray)
dt = float(deltat)
# Calculate D starting from outer boundary:
D = np.zeros(len(x))
m = len(x) - 1
# now do the solution:
for i in range(m,1,-1):
xl = np.float64(x[i] - x[i-1])
r = np.float64(y0[i] - y1[i])
p = np.float64(dt * (y0[i] - y0[i-1]) / (xl * xl))
if i == m:
D[i] = np.float64(r / p)
else:
xr = np.float64(x[i+1] - x[i])
xm = np.float64(xl + xr) / 2.
q = np.float64(dt * (y0[i] - y0[i+1]) / (xr * xm))
D[i] = np.float64((r - q * D[i+1]) / p)
D = D * 1.e16 # Mm^2/s ==> cm^2/s
x = x * 1e8 # Mm ==> cm
pl.figure()
pl.plot(xlong,np.log10(y0long),utils.linestyle(1)[0],\
markevery=utils.linestyle(1)[1],\
label='fluid above'+' '+str(fname1))
pl.plot(xlong,np.log10(y1long),utils.linestyle(2)[0],\
markevery=utils.linestyle(2)[1],\
label='fluid above'+' '+str(fname2))
pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.xlabel('r / Mm')
pl.ylim(-8,0.1)
pl.legend(loc='lower left').draw_frame(False)
if grid:
pl.grid()
pl.twinx()
pl.plot(x/1.e8,np.log10(D),'k-',\
label='$D$') #'$D > 0$')
pl.plot(x/1.e8,np.log10(-D),'k--',\
label='$D < 0$')
pl.ylabel('$\log D\,/\,{\\rm cm}^2\,{\\rm s}^{-1}$')
pl.legend(loc='upper right').draw_frame(False)
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def D_OPA(self,fname1,fname2,fluid='FV H+He',numtype='ndump',N=4,niter=5,
debug=False,grid=False,FVaverage=False,tauconv=None,returnY=False):
'''
Use the optimal perturbation algorithm as described in
Li & Gao (2010), International Conference on Computer
Application and System Modeling (ICCASM 2010)
Parameters
----------
fname1,fname2 : int or float
cycles from which to take initial and final abundance profiles
for the diffusion step we want to mimic.
fluid : string
Which fluid do you want to track?
numtype : string, optional
Designates how this function acts and how it interprets
fname. If numType is 'file', this function will get the
desired attribute from that file. If numType is 'NDump'
function will look at the cycle with that nDump. If
numType is 'T' or 'time' function will find the _cycle
with the closest time stamp.
The default is "ndump".
N : integer
dimension of vector space (how many terms in the basis
function summarion)
niter : integer
number of iterations of the optimal perturbation algorithm
grid : boolean, optional
whether or not to show the axes grids.
The default is False.
FVaverage : boolean, optional
Whether or not to average the abundance profiles over a
convective turnover timescale. See also tauconv.
The default is False.
tauconv : float, optional
If averaging the abundance profiles over a convective turnover
timescale, give the convective turnover timescale (seconds).
The default value is None.
returnY : boolean, optional
If True, return abundance vectors as well as radius and diffusion
coefficient vectors
The default is False.
Output
------
x : array
radial co-ordinates (Mm) for which we have a diffusion coefficient
D : array
Diffusion coefficient (cm^2/s)
Example
-------
YY=ppm.yprofile(path_to_data)
YY.Dsolvedown(1,640)
'''
from diffusion import mixdiff08
xlong = self.get('Y',fname=fname1,resolution='l') # for plotting
if debug: print xlong
x = xlong
def mf(fluid,fname):
'''
Get mass fraction profile of fluid 'fluid' at fname.
'''
y = self.get(fluid,fname=fname,resolution='l')
if fluid == 'FV H+He':
rhofluid = self.get('Rho H+He',fname=fname,resolution='l')
else:
rhofluid = self.get('RHOconv',fname=fname,resolution='l')
rho = self.get('Rho',fname=fname,resolution='l')
y = rhofluid * y / rho
return y
if FVaverage is False:
y1 = mf(fluid,fname2)
y1long = y1 # for plotting
y0 = mf(fluid,fname1)
y0long = y0 # for plotting
else:
if tauconv is None:
raise IOError("Please define tauconv")
# Find the dumps accross which one should average:
# first profile:
myt0 = self.get('t',fname1)[-1]
myt01 = myt0 - tauconv / 2.
myt02 = myt0 + tauconv / 2.
myidx01 = np.abs(self.get('t') - myt01).argmin()
myidx02 = np.abs(self.get('t') - myt02).argmin()
mycyc01 = self.cycles[myidx01]
mycyc02 = self.cycles[myidx02]
# second profile:
myt1 = self.get('t',fname2)[-1]
myt11 = myt1 - tauconv / 2.
myt12 = myt1 + tauconv / 2.
myidx11 = np.abs(self.get('t') - myt11).argmin()
myidx12 = np.abs(self.get('t') - myt12).argmin()
mycyc11 = self.cycles[myidx11]
mycyc12 = self.cycles[myidx12]
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc01,mycyc02):
ytmp += mf(fluid,cyc)
count+=1
y0 = ytmp / float(count)
# do the average for the first profile:
ytmp = np.zeros(len(x))
count=0
for cyc in range(mycyc11,mycyc12):
ytmp += mf(fluid,cyc)
count+=1
y1 = ytmp / float(count)
y0long = y0
y1long = y1
if fluid == 'FV H+He':
y1 = y1[::-1]
x = x[::-1]
y0 = y0[::-1]
if debug: print len(xlong), len(y0long)
idx0 = np.abs(np.array(self.cycles) - fname1).argmin()
idx1 = np.abs(np.array(self.cycles) - fname2).argmin()
t0 = self.get('t')[idx0]
t1 = self.get('t')[idx1]
deltat = t1 - t0
# # now we want to exclude any zones where the abundances
# # of neighboring cells are the same. This is hopefully
# # rare inside the computational domain and limited to only
# # a very small number of zones
# indexarray = np.where(np.diff(y0) == 0)[0]
# print 'removing zones:', indexarray
# y1 = np.delete(y1,indexarray)
# y0 = np.delete(y0,indexarray)
# x = np.delete(x,indexarray)
dt = float(deltat)
# fix numbers by hand for testing:
x = np.arange(0.,2.*np.pi,0.005)
y0 = np.cos(x) + 2.
deltat = 1.
dt = 1.
y1 = mixdiff08(x,deltat=deltat,y0=y0,D=np.cos(x/2.)+1.,nst=1)
M = len(y1) # spatial dimension
G = np.zeros([M,N]) # G matrix
I = np.identity(N) # N-dimension identity matrix
D = np.zeros(M) # diffusion coefficient profile
Ds = np.zeros([M,N]) # perturbed D profiles
eta = y1 # actual solution for abundances
zeta = np.zeros(M) # solution to diffusion equation for current D
da = np.array([0.1]*N) # initial perturbation vector
dus = np.zeros([M,N]) # solutions with each N-dim coefficient perturbed
tau = np.array([1.e-1,1.e-2,1.e-2,1.e-2,1.e-2,1.e-2,1.e-2]) # numerical differential steps vector
# basis functions
def phi0(blah):
return 1.
def phi1(blah):
# return blah
return np.sin(blah)
def phi2(blah):
# return blah**2
# return np.exp(blah)
return np.cos(blah)
def phi3(blah):
return blah**3
# return np.exp(2.*blah)
return np.sin(2.*blah)
def phi4(blah):
return blah**4
# return np.exp(3.*blah)
return np.cos(2.*blah)
def phi5(blah):
# return blah**5
# return np.exp(4.*blah)
return np.sin(3.*blah)
def phi6(blah):
# return blah**5
# return np.exp(4.*blah)
return np.cos(3.*blah)
basis = [phi0,phi1,phi2,phi3,phi4,phi5,phi6]
def build_D(coeffs):
'''
make the D profile given the coefficients of the
basis function
'''
localD = np.zeros(M)
for i in range(M):
for j in range(N):
localD[i] += coeffs[j] * basis[j](x[i])
return localD
# make initial guess for D as constant from initial da vector,
# asuming initial guess is give by a = da
#D = build_D(da)
D[:] = 1. # initial guess
D[1] = -0.5
pl.figure(1000000)
pl.plot(x,y0,utils.linestyle(1)[0],\
markevery=utils.linestyle(1)[1],\
label='fluid above'+' '+str(fname1))
pl.plot(x,y1,utils.linestyle(2)[0],\
markevery=utils.linestyle(2)[1],\
label='fluid above'+' '+str(fname2))
pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.xlabel('r / Mm')
pl.twinx()
pl.plot(x,np.cos(x/2.)+1.,'k-',label='$D>0$')
# pl.ylim(-8,0.1)
# begin iterative loop:
for iter in range(niter):
# solve diffusion equation for D:
zeta = mixdiff08(x,deltat=deltat,y0=y0,D=D,nst=1)
pl.figure(iter)
pl.suptitle('iteration '+str(iter))
pl.plot(x,y0,utils.linestyle(1)[0],\
markevery=utils.linestyle(1)[1],\
label='fluid above'+' '+str(fname1))
pl.plot(x,zeta,utils.linestyle(2)[0],\
markevery=utils.linestyle(2)[1],\
label='fluid above'+' '+str(fname2))
pl.ylabel('$\log\,X$ '+fluid.replace('FV',''))
pl.xlabel('r / Mm')
# pl.ylim(-8,0.1)
pl.legend(loc='lower left').draw_frame(False)
pl.twinx()
pl.plot(x,D,'k-',label='$D>0$')
# pl.plot(x,np.log10(-D),'k:',label='$D<0$')
pl.ylabel('$\log D\,/\,{\\rm cm}^2\,{\\rm s}^{-1}$')
pl.legend(loc='upper right').draw_frame(False)
# perturb the D profile and solve the diffusion equation N more times
# (once for each perturbed basis function coefficient):
for i in range(N):
Ds[:,i] = np.array([D[i] + tau[i]*basis[i](x[l]) for l in range(M)])
dus[:,i] = mixdiff08(x,deltat=deltat,y0=y0,D=Ds[:,i],nst=1)
# Now we can form the G matrix:
for i in range(M):
for j in range(N):
G[i,j] = (dus[i,j] - zeta[i]) / tau[j]
print 'max and min values in matrix G:'
print np.max(G), np.min(G)
# calculate regularization parameter:
eps = 0.3 # noise level of data; I think 0.1 == 10%
alpha = 4. ** (-2./3.) * eps ** (2./3.) * np.min(G) ** 2
length = np.linalg.norm(eta - zeta)
alpha = alpha / (length ** (2./3.))
GT = np.transpose(G)
GTG = np.dot(GT,G)
toinv = alpha * I + GTG
linv = np.linalg.inv(toinv)
dif = eta - zeta
right = np.dot(GT,dif)
da = np.dot(linv,right) # optimal perturbation vector
print da
# update the D profile with the suggested perturbations:
dD = build_D(da)
for i in range(M):
D[i] = D[i] + dD[i]
if returnY:
return x/1.e8, D, y0, y1
else:
return x/1.e8,D
def entrainment_rate_MA(self,transient,tend=-1.,rad_upper_int=None,
sparse=1,plots=1.):
'''
Calculate the entrainment rate.
Parameters
----------
transient : float
time (s) of the initial transient period that you do not
want to include in the calculation of the entrainment rate.
tend : float, optional
cutoff time for considering the entrainment rate, so that
the result is calculated for simulation times between
the value of transient and tend. If -1 then do until the
end of the available dataset
rad_upper_int: float
upper boundary for integration of entrained material, if not
specified the upper boundary will be located where the FV H+He
gradient is steepest (needs to be debugged)
plots: float
If plots=1, the function prints the entrainment rate and produces
a plot. If plots is not equal to 1, the fuction returns the
entrainment rate and does not make a plot.
Examples
--------
import ppm
run1='/Users/swj/Mnt/CADC/swj/PPM/RUNS_DATA/O-shell-M25/D1'
YY=ppm.yprofile(run1)
ppm.set_nice_params()
YY.entrainment_rate_MA(300)
Notes
-----
Optionally upper boundary will be determined as in
Meakin & Arnett (2007) d(Eq. 28)/dt; see parameter rad_upper_int
'''
def steepest(fname):
'''
Returns the index of the zone with the steepest H+He
fractional volume gradient. This is used as the
definition of the 'surface' of the convection zone
in Meakin & Arnett (2007).
'''
r = self.get('Y',fname=fname,resolution='l')
x = self.get('FV H+He',fname=fname,resolution='l')
dxdr = np.diff(x) / np.diff(r)
idx = dxdr.argmax()
return idx
cycs = self.cycles
time = self.get('t')
istart = np.abs(time-transient).argmin()
if tend == -1.:
istop = -1
else:
istop = np.abs(time-tend).argmin()
cycs = cycs[istart:istop:sparse]
time = time[istart:istop:sparse]
def Mi(c,idxb=None):
if idxb is None:
idxb = steepest(c)
r = self.get('Y',fname=c,resolution='l')[idxb:-1][::-1]*1.e8 # cgs
r2 = r*r
rho = self.get('Rho',fname=c,resolution='l')[idxb:-1][::-1]*1.e3 # cgs
dr = np.average(np.diff(r))
rho1 = self.get('Rho H+He',fname=c,resolution='l')[idxb:-1][::-1]*1.e3
FV1 = self.get('FV H+He',fname=c,resolution='l')[idxb:-1][::-1]
X = rho1 * FV1 / rho
dm = 4.*np.pi*r2*rho*dr
dm = dm / ast.msun_g
result = sum(X*dm)
return result
idxb = None
if rad_upper_int is not None:
r = self.get('Y',fname=cycs[0],resolution='l')
idxb = abs(r-rad_upper_int).argmin()
Mir = np.array([Mi(cyc,idxb) for cyc in cycs])
fit=np.polyfit(time,Mir,1) # linear
timelong = time
timelong = np.append(timelong,timelong[-1]*1.1)
timelong = np.insert(timelong,0,timelong[0]*.9)
yfit = fit[0]*timelong + fit[1]
# printing and plotting:
if plots == 1.:
print 'entrainment rate = '+str(fit[0])+' Msun / s'
ylow = yfit[0]
yfit = (yfit - ylow) / 1.e-5
Mir = (Mir - ylow) / 1.e-5
pl.figure()
try:
from utils import colourblind as cb
pl.plot(time/60.,Mir,marker='o',color=cb(10),markevery=20)
pl.plot(timelong/60.,yfit,label='linear',color=cb(4))
except:
pl.plot(time/60.,Mir,marker='o',color='r',markevery=20)
pl.plot(timelong/60.,yfit,label='linear',color='k')
pl.xlabel('t / min')
pl.ylabel('$(M_i - $'+str(round(ylow,5))+') / $10^{-5}\,M_\odot$')
pl.legend(loc='best')
return fit[0]
def entrainment_rate(self, cycles, r_min, r_max, var='vxz', criterion='min_grad', \
offset=0., integrate_both_fluids=False,
integrate_upwards=False, show_output=True, ifig0=1, \
show_fits=True, mdot_curve_label=None, file_name=None,
return_time_series=False):
def regrid(x, y, x_int):
int_func = interpolate.CubicSpline(x[::-1], y[::-1])
return int_func(x_int)
def diff(x):
dx = 0.5*(np.roll(x, -1) - np.roll(x, +1))
dx[0] = dx[1]
dx[-1] = dx[-2]
return dx
r = self.get('Y', fname = cycles[0], resolution='l')
idx_min = np.argmin(np.abs(r - r_max))
idx_max = np.argmin(np.abs(r - r_min))
r_min = r[idx_max]
r_max = r[idx_min]
r = r[idx_min:(idx_max + 1)]
r_int = np.linspace(r_min, r_max, num = 20.*(idx_max - idx_min + 1))
dr_int = diff(r_int)
time = np.zeros(len(cycles))
r_b = np.zeros(len(cycles))
r_top = np.zeros(len(cycles))
for i in range(len(cycles)):
time[i] = self.get('t', fname = cycles[i], resolution='l')[-1]
if var == 'vxz':
q = self.get('EkXZ', fname = cycles[i], resolution='l')[idx_min:(idx_max + 1)]**0.5
else:
q = self.get(var, fname = cycles[i], resolution='l')[idx_min:(idx_max + 1)]
q_int = regrid(r, q, r_int)
grad = diff(q_int)/dr_int
if criterion == 'min_grad':
idx_b = np.argmin(grad)
elif criterion == 'max_grad':
idx_b = np.argmax(grad)
else:
idx_b = np.argmax(np.abs(grad))
r_b[i] = r_int[idx_b]
r_top[i] = r_b[i]
# Optionally offset the integration limit by a multiple of q's
# scale height.
if np.abs(grad[idx_b]) > 0.:
H_b = q_int[idx_b]/np.abs(grad[idx_b])
r_top[i] += offset*H_b
timelong = time
delta = 0.05*(np.max(time) - np.min(time))
timelong = np.insert(timelong,0, timelong[0] - delta)
timelong = np.append(timelong, timelong[-1] + delta)
# fc = fit coefficients
r_b_fc = np.polyfit(time, r_b, 1)
r_b_fit = r_b_fc[0]*timelong + r_b_fc[1]
r_top_fc = np.polyfit(time, r_top, 1)
r_top_fit = r_top_fc[0]*timelong + r_top_fc[1]
m_ir = np.zeros(len(cycles))
r = self.get('Y', fname = cycles[0], resolution='l')
r_int = np.linspace(np.min(r), np.max(r), num = 20.*len(r))
dr_int = diff(r_int)
for i in range(len(cycles)):
if integrate_both_fluids:
rho = self.get('Rho', fname = cycles[i], resolution='l')
else:
rho_HHe = self.get('Rho H+He', fname = cycles[i], resolution='l')
FV_HHe = self.get('FV H+He', fname = cycles[i], resolution='l')
rho = rho_HHe*FV_HHe
rho_int = regrid(r, rho, r_int)
idx_top = np.argmin(np.abs(r_int - r_top[i]))
dm = 4.*np.pi*r_int**2*dr_int*rho_int
if integrate_upwards:
m_ir[i] = np.sum(dm[(idx_top + 1):-1])
else:
m_ir[i] = np.sum(dm[0:(idx_top + 1)])
# fc = fit coefficients
m_ir *= 1e27/ast.msun_g
m_ir_fc = np.polyfit(time, m_ir, 1)
m_ir_fit = m_ir_fc[0]*timelong + m_ir_fc[1]
if integrate_upwards:
mdot = -m_ir_fc[0]
else:
mdot = m_ir_fc[0]
if show_output:
cb = utils.colourblind
pl.close(ifig0); fig1 = pl.figure(ifig0)
pl.plot(time/60., r_top, color = cb(5), ls = '-', label = r'r$_\mathrm{top}$')
pl.plot(time/60., r_b, color = cb(8), ls = '--', label = r'r$_\mathrm{b}$')
if show_fits:
pl.plot(timelong/60., r_top_fit, color = cb(4), ls = '-', lw = 0.5)
pl.plot(timelong/60., r_b_fit, color = cb(4), ls = '-', lw = 0.5)
pl.xlabel('t / min')
pl.ylabel('r / Mm')
xfmt = ScalarFormatter(useMathText = True)
pl.gca().xaxis.set_major_formatter(xfmt)
pl.legend(loc = 0)
fig1.tight_layout()
print 'r_b is the radius of the convective boundary.'
print 'r_b_fc = ', r_b_fc
print 'dr_b/dt = {:.2e} km/s\n'.format(1e3*r_b_fc[0])
print 'r_top is the upper limit for mass integration.'
print 'dr_top/dt = {:.2e} km/s'.format(1e3*r_top_fc[0])
max_val = np.max(m_ir)
if show_fits:
max_val = np.max((max_val, np.max(m_ir_fit)))
max_val *= 1.1 # allow for some margin at the top
oom = int(np.floor(np.log10(max_val)))
pl.close(ifig0 + 1); fig2 = pl.figure(ifig0 + 1)
lbl = 'measured time series'
if mdot_curve_label is not None:
lbl = mdot_curve_label
pl.plot(time/60., m_ir/10**oom, color = cb(5), label = lbl)
mdot_str = '{:e}'.format(mdot)
parts = mdot_str.split('e')
mantissa = float(parts[0])
exponent = int(parts[1])
if show_fits:
if integrate_upwards:
lbl = r'$\dot{{\mathrm{{M}}}}_\mathrm{{a}} = {:.2f} \times 10^{{{:d}}}$ M$_\odot$ s$^{{-1}}$'.\
format(-mantissa, exponent)
else:
lbl = r'$\dot{{\mathrm{{M}}}}_\mathrm{{e}} = {:.2f} \times 10^{{{:d}}}$ M$_\odot$ s$^{{-1}}$'.\
format(mantissa, exponent)
pl.plot(timelong/60., m_ir_fit/10**oom, color = cb(4), ls = '-', lw = 0.5, label = lbl)
pl.xlabel('t / min')
if integrate_upwards:
sub = 'a'
else:
sub = 'e'
ylbl = r'M$_{:s}$ / 10$^{{{:d}}}$ M$_\odot$'.format(sub, oom)
if oom == 0.:
ylbl = r'M$_{:s}$ / M$_\odot$'.format(sub)
pl.ylabel(ylbl)
yfmt = FormatStrFormatter('%.1f')
fig2.gca().yaxis.set_major_formatter(yfmt)
fig2.tight_layout()
if integrate_upwards:
loc = 1
else:
loc = 2
pl.legend(loc = loc)
if file_name is not None:
fig2.savefig(file_name)
print 'Resolution: {:d}^3'.format(2*len(r))
print 'm_ir_fc = ', m_ir_fc
print 'Entrainment rate: {:.3e} M_Sun/s'.format(mdot)
if return_time_series:
return m_ir
else:
return mdot
def vaverage(self,vi='v',transient=0.,sparse=1):
'''
plots and returns the average velocity profile for a given
orientation (total, radial or tangential) over a range of dumps
and excluding an initial user-specified transient in seconds.
'''
cycs = self.cycles
time = self.get('t')
istart = np.abs(time-transient).argmin()
cycs = cycs[istart::sparse]
Y = self.get('Y',fname=1,resolution='l')
if vi == 'v':
Ei ='Ek'
ylab='$\log~v_\mathrm{tot}$'
if vi == 'vY':
Ei = 'EkY'
ylab='$\log~v_\mathrm{Y}$'
if vi == 'vXZ':
Ei = 'EkXZ'
ylab='$\log~v_\mathrm{XZ}$'
vav = np.zeros(len(Y))
for cyc in cycs:
Ek = self.get(Ei,fname=cyc,resolution='l')
if vi == 'v':
v = np.sqrt(2.*array(Ek,dtype=float))
else:
v = np.sqrt(array(Ek,dtype=float))
vav += v
vav = vav * 1.e8 / len(cycs) # average in cm / s
pl.figure()
pl.plot(Y,np.log10(vav),'r-')
pl.ylabel(ylab)
pl.xlabel('r / Mm')
return vav
# below are some utilities that the user typically never calls directly
def readTop(self,atri,filename,stddir='./'):
"""
Private routine that Finds and returns the associated value for
attribute in the header section of the file.
Input:
atri, what we are looking for.
filename where we are looking.
StdDir the directory where we are looking, Defaults to the
working Directory.
"""
if stddir.endswith('/'):
filename = str(stddir)+str(filename)
else:
filename = str(stddir)+'/'+str(filename)
f=open(filename,'r')
headerLines=[]
header=[]
headerAttri=[]
for i in range(0,10): # Read out the header section of the file.
line = f.readline()
line=line.strip()
if line != '':
headerLines.append(line)
f.close()
for i in range(len(headerLines)): #for each line of header data split up the occurances of ' '
header.extend(headerLines[i].split(' '))
header[i]=header[i].strip()
for i in range(len(header)):
tmp=header[i].split('=')# for each line split up on occurances of =
if len(tmp)!=2: # If there are not two parts, add the unsplit line to headerAttri
tmp=[]
tmp.append(header[i].strip())
headerAttri.append(tmp)
elif len(tmp)==2: # If there are two parts, add the list of the two parts to headerAttri
tmp[0]=tmp[0].strip()
tmp[1]=tmp[1].strip()
headerAttri.append([tmp[0],tmp[1]])
for i in range(len(headerAttri)):
if atri in headerAttri[i]: # if the header arrtibute equals atri, return its associated value
value=headerAttri[i][1]
value =value.partition(' ')
value=value[0]
return value
def _readFile(self):
"""
private routine that is not directly called by the user.
filename is the name of the file we are reading
stdDir is the location of filename, defaults to the
working directory
Returns a list of the header attributes with their values
and a List of the column values that are located in this
particular file and a list of directory attributes.
Assumptions:
An attribute can't be in the form of a num, if
the user can float(attribute) without an error
attribute will not be returned
Lines of attributs are followd and preceded by
*blank lines
"""
filename = os.path.join(self.sldir,self.slname)
f=open(filename,'r')
line=''
headerLines=[] # List of lines in the header section of the YProfile
header=[] # Single line of header data
tmp=[]
tmp2=[]
headerAttri=[] # Final list of header Data to be retruned
colAttri=[] # Final list of column attributes to be returned
cycAttri=[] # Final list of cycle attributes to be returned
for i in range(0,10): # read the first 10 lines of the YProfile
# Add the line to headerLines if the line is not empty
line = f.readline()
line=line.strip()
if line != '':
headerLines.append(line)
for i in range(len(headerLines)): # For each line split on occurances of ' '
# And then clean up any extra whitespace.
header.extend(headerLines[i].split(' '))
header[i]=header[i].strip()
for i in range(len(header)):# for each line split up on occurances of =
tmp=header[i].split('=')
if len(tmp)!=2: # If there are not two parts, add the unsplit line to headerAttri
tmp=[]
tmp.append(header[i].strip())
headerAttri.append(tmp)
elif len(tmp)==2: # If there are two parts, add the list of the two parts to headerAttri
tmp[0]=tmp[0].strip()
tmp[1]=tmp[1].strip()
headerAttri.append([tmp[0],tmp[1]])
lines= f.readlines()
boo = True
ndump=False
attri=[]
for i in range(len(lines)-2): #for the length of the file
if lines[i] =='\n'and lines[i+2]=='\n': # If there is a blank line,
#that is followed by some line and by another blank line
# it means the second line is a line of attributes
line = lines[i+1] # line of attributes
line=line.split(' ') # split it up on occurances of ' '
for j in range(len(line)): #Clean up any excess whitespace
if line[j]!='': #And add it to a list of attributes
attri.append(line[j].strip())
for j in range(len(attri)):
"""
if attri[j]=='Ndump':
i = len(lines)
break
"""
for k in range(len(colAttri)): #If it is not allready in the list of Attributes
# add it
if colAttri[k]==attri[j]:
boo = False
break
if boo :
colAttri.append(attri[j])
boo=True
tmp=[]
for i in range(len(colAttri)):#gets rid of blank lines in the list
if colAttri[i]!='':
tmp.append(colAttri[i])
colAttri=tmp
tmp=[]
for i in range(len(colAttri)):#gets rid of numbers in the list
try:
float(colAttri[i])
except ValueError:
tmp.append(colAttri[i])
colAttri=tmp
tmp=[]
# NOTE at this point in the program colAttri is a unified list of Column attributes and Cycle Attributes
for i in range(len(colAttri)): #Here we split up our list into Column attributes and Cycle Attributes
if colAttri[i]=='Ndump':
# If we get to Ndump in our list of attributes, then any following attributes are cycle attributes
ndump=True
if not ndump:
tmp.append(colAttri[i])
else:
cycAttri.append(colAttri[i])
colAttri=tmp
f.close()
return headerAttri,colAttri, cycAttri
def spacetime_diagram(self, var_name, nt, fig, tlim=None, rlim=None, vlim=None, logscale=True, \
cmap='viridis', aspect=1./3., zero_intervals=None, patience0 = 5, patience = 30, \
**kwargs):
if var_name == 'Ek':
cbar_lbl = r'e$_\mathrm{k}$ / erg g$^{-1}$'
unit = 1e43/1e27
elif var_name == 'enuc_C12pg':
cbar_lbl = r'$\epsilon_\mathrm{C12pg}$ / erg cm$^{-3}$ s$^{-1}$'
unit = 1e43/1e24
else:
cbar_lbl = var_name
unit = 1.
r = self.get('Y', fname = 0, resolution = 'l')
if rlim is None:
rlim = [r[-1], r[0]]
ridx0 = np.argmin(np.abs(r - rlim[1]))
ridx1 = np.argmin(np.abs(r - rlim[0]))
nr = ridx1-ridx0+1
ridx = np.linspace(ridx0, ridx1, num=nr, dtype=np.int32)
r = r[ridx]
if tlim is None:
tlim = [0., 0.]
tlim[0] = self.get('t', silent = True)[0]
tlim[1] = self.get('t', silent = True)[-1]
t = np.linspace(tlim[0], tlim[1], nt)
zero = np.zeros(nt, dtype = bool)
if zero_intervals is not None:
for i in range(len(zero_intervals)/2):
idx = where((t >= zero_intervals[2*i]) & \
(t <= zero_intervals[2*i + 1]))
zero[idx] = True
var = np.zeros((nr, nt))
t00 = time.time()
t0 = t00
n = 0
k = 0
n_nonzero = nt - np.count_nonzero(zero)
for i in range(nt):
if zero[i]:
continue
var[:, i] = unit*self.get(var_name, fname = t[i], numtype = 'time', \
resolution = 'l', silent = True, **kwargs)[ridx]
n += 1
t_now = time.time()
if (t_now - t0 >= patience) or \
((t_now - t00 < patience) and (t_now - t00 >= patience0) and (k == 0)):
time_per_dump = (t_now - t00)/float(n)
time_remaining = (n_nonzero - n - 1.)*time_per_dump
print 'Processing will be done in {:.0f} s.'.format(time_remaining)
t0 = t_now
k += 1
if vlim is None:
if logscale:
vlim = [np.min(var[where(var > 0)]), \
np.max(var[where(var > 0)])]
else:
vlim = [np.min(var), np.max(var)]
print 'vlim = [{:.3e}, {:.3e}]'.format(vlim[0], vlim[1])
var[where(var < vlim[0])] = vlim[0]
var[where(var > vlim[1])] = vlim[1]
ax1 = fig.add_subplot(111)
extent = (t[0]/60., t[-1]/60., r[-1], r[0])
aspect *= (extent[1] - extent[0])/(extent[3] - extent[2])
if logscale:
norm = colors.LogNorm(vmin=vlim[0], vmax=vlim[1], clip=True)
else:
norm = colors.Normalize(vmin=vlim[0], vmax=vlim[1], clip=True)
ax1i = ax1.imshow(var, aspect = aspect, cmap = cmap, extent = extent, \
norm=norm, interpolation = 'spline16')
ax1.get_yaxis().set_tick_params(which='both', direction='out')
ax1.get_xaxis().set_tick_params(which='both', direction='out')
ax1.set_xlabel('t / min')
ax1.set_ylabel('r / Mm')
cbar = fig.colorbar(ax1i, orientation='vertical')
cbar.set_label(cbar_lbl)
class rprofile(DataPlot):
"""
Data structure for holding data in the RProfile*.bobaaa files.
Parameters
----------
sldir : string
which directory we are working in. The default is '.'.
"""
def __init__(self, sldir='.'):
"""
init method
Parameters
----------
sldir : string
which directory we are working in. The default is '.'.
"""
self.files = [] # List of files in this directory
self.cycles = [] # list of cycles in this directory
self.hattrs = [] # header attributes
self.dcols = [] # list of the column attributes
self.cattrs= [] # List of the attributes of the y profiles
self._cycle=[] # private var
self._top=[] # privite var
self.sldir = sldir #Standard Directory
if not os.path.exists(sldir): # then try the VOSpace mount
try:
sldir = ppm_path+'/'+sldir
except:
print 'VOSpace not mounted and '+sldir+' does not exist locally'
if not os.path.exists(sldir): # If the path still does not exist
print 'error: Directory, '+sldir+ ' not found'
print 'Now returning None'
return None
else:
f=os.listdir(sldir) # reads the directory
for i in range(len(f)): # Removes any files that are not YProfile files
if 'RProfile' in f[i] and '.bobaaa' in f[i] and 'ps' not in f[i] :
self.files.append(f[i])
self.files.sort()
if len(self.files)==0: # If there are no YProfile Files in thes Directory
print 'Error: no RProfile named files exist in Directory'
print 'Now returning None'
return None
slname=self.files[0]
print "Reading attributes from file ",slname,sldir
self.hattrs, self.dcols, self._cycle = self._readFile(slname,sldir)
return
# self._splitHeader() #Splits the HeaTder into header attributes and top attributes
# self.hattrs=self._formatHeader() # returns the header attributes as a dictionary
# self.cattrs=self.getCattrs() # returns the concatination of Cycle and Top Attributes
# self.ndumpDict=self.ndumpDict(self.files)
# print 'There are '+str(len(self.files)) + ' RProfile files in the ' +self.sldir+' directory.'
# print 'Ndump values range from ' + str(min(self.ndumpDict.keys()))+' to '+str(max(self.ndumpDict.keys()))
# t=self.get('t',max(self.ndumpDict.keys()))
# t1=self.get('t',min(self.ndumpDict.keys()))
# print 'Time values range from '+ str(t1[-1])+' to '+str(t[-1])
# self.cycles=self.ndumpDict.keys()
def _readFile(self,filename,stddir='./'):
"""
private routine that is not directly called by the user.
filename is the name of the file we are reading
stdDir is the location of filename, defaults to the
working directory
Returns a list of the header attributes with their values
and a List of the column values that are located in this
particular file
"""
if stddir.endswith('/'):
filename = str(stddir)+str(filename)
else:
filename = str(stddir)+'/'+str(filename)
self.rp = rprofile_reader(filename)
header_keys = self.rp.header_attrs.keys()
data_columns = self.rp.names
dump_keys = ['dump']
return header_keys, data_columns, dump_keys
# def get(self, attri, fname=None, numtype='ndump', resolution='H'):
def get(self, attri, dataset=None):
"""
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
dataset : string
The attribute we are looking for.
"""
return self.rp.get(attri)
# isCyc=False #If Attri is in the Cycle Atribute section
# isCol=False #If Attri is in the Column Atribute section
# isHead=False #If Attri is in the Header Atribute section
# if fname==None:
# fname=max(self.ndumpDict.keys())
# if attri in self.cattrs: # if it is a cycle attribute
# isCyc = True
# elif attri in self.dcols:# if it is a column attribute
# isCol = True
# elif attri in self.hattrs:# if it is a header attribute
# isHead = True
# # directing to proper get method
# if isCyc:
# return self.getCycleData(attri,fname, numtype, resolution=resolution)
# if isCol:
# return self.getColData(attri,fname, numtype, resolution=resolution)
# if isHead:
# return self.getHeaderData(attri)
# else:
# print 'That Data name does not appear in this YProfile Directory'
# print 'Returning none'
# return None
##########################################################
# mapping of visualisation variables from Robert Andrassy:
##########################################################
@np.vectorize
def map_signed(x, p0, p1, s0, s1):
'''
This function emulates the mapping of signed variables in PPMstar.
x: input value in code units; can be a single number or a vector
p0, p1: mapping parameters used in the first scaling step
s0, s1: mapping parameters used in the last scaling step (just before
the conversion to an integer)
'''
thyng = (x - p0)*p1
thang = thyng * thyng + 1.
thang = np.sqrt(thang)
thyng = thyng + thang
thyng = thyng * thyng
thang = thyng + 1.
thyng = thyng / thang
y = s1 * thyng + s0
return y
@np.vectorize
def inv_map_signed(y, p0, p1, s0, s1):
'''
This function inverts map_signed().
'''
if y <= s0:
x = -np.inf
elif y >= s0 + s1:
x = np.inf
else:
def func(x):
return y - map_signed(x, p0, p1, s0, s1)
x = optimize.newton(func, 0.)
return x
@np.vectorize
def map_posdef(x, p0, p1, s0, s1):
'''
This function emulates the mapping of positive definite variables in PPMstar.
x: input value in code units; can be a single number or a vector
p0, p1: mapping parameters used in the first scaling step
s0, s1: mapping parameters used in the last scaling step (just before
the conversion to an integer)
'''
thyng = (x - p0)*p1
thang = thyng * thyng + 1.
thang = np.sqrt(thang)
thyng = thyng + thang
thyng = thyng * thyng
thang = thyng + 1.
thyng = (thyng - 1.) / thang
y = s1 * thyng + s0
return y
@np.vectorize
def inv_map_posdef(y, p0, p1, s0, s1):
'''
This function inverts map_posdef().
'''
if y <= s0:
x = -np.inf
elif y >= s0 + s1:
x = np.inf
else:
def func(x):
return y - map_posdef(x, p0, p1, s0, s1)
x = optimize.newton(func, 0.)
return x
def make_colourmap(colours, alphas=None):
'''
make a matplotlib colormap given a list of index [0-255], RGB tuple values
(normalised 0-1) and (optionally) a list of alpha index [0-255] and alpha values, i.e.:
colours = [[0, (0., 0., 0.)], [1, (1., 1., 1.)]]
alphas = [[0, 0.], [1, 1.]]
'''
indices_normed = np.array([float(c[0])/255. for c in colours])
# enforce [0-1]
if indices_normed[-1] != 1.:
print 'first/last colour indices:', indices_normed[-1]
print 'correcting to 1.'
indices_normed[-1] = 1.
rgb = colours
cdict = {'red': [], 'green': [], 'blue': []}
for i in range(len(colours)):
myrgb = rgb[i][1]
cdict['red'].append([indices_normed[i], myrgb[0], myrgb[0]])
cdict['green'].append([indices_normed[i], myrgb[1], myrgb[1]])
cdict['blue'].append([indices_normed[i], myrgb[2], myrgb[2]])
if alphas!=None:
cdict['alpha'] = []
indices_normed = np.array([float(a[0]) / 255. for a in alphas])
alpha = alphas
for i in range(len(alphas)):
myalpha = alpha[i][1]
cdict['alpha'].append([indices_normed[i], myalpha, myalpha])
cmap = matplotlib.colors.LinearSegmentedColormap('ppm', cdict, N=1536)
return cmap
def colourmap_from_str(str, segment=None):
points = []
for line in str.splitlines():
parts = line.split()
if (len(parts) == 5) and (parts[0] == 'Cnot:'):
points.append(parts[1:])
points = np.array(points, dtype=np.float)
points = points[points[:,0].argsort()]
if segment is not None:
# Index of the first point with value > segment[0].
idx0 = np.argmax(points[:, 0] > segment[0])
if idx0 > 0:
t = (float(segment[0]) - points[idx0 - 1, 0])/ \
(points[idx0, 0] - points[idx0 - 1, 0])
new_point = (1. - t)*points[idx0 - 1, :] + t*points[idx0, :]
points = np.vstack([new_point, points[idx0:, :]])
# Index of the first point with value > segment[1].
idx1 = np.argmax(points[:, 0] > segment[1])
if idx1 > 0:
t = (float(segment[1]) - points[idx1 - 1, 0])/ \
(points[idx1, 0] - points[idx1 - 1, 0])
if t > 0.:
new_point = (1. - t)*points[idx1 - 1, :] + t*points[idx1, :]
points = np.vstack([points[0:idx1, :], new_point])
else:
points = points[0:idx1, :]
p0 = points[0, 0]
p1 = points[-1, 0]
for i in range(points.shape[0]):
points[i, 0] = (points[i, 0] - p0)/(p1 - p0)
r = np.zeros((points.shape[0], 3))
r[:, 0] = points[:, 0]
r[:, 1] = points[:, 1]
r[:, 2] = points[:, 1]
g = np.zeros((points.shape[0], 3))
g[:, 0] = points[:, 0]
g[:, 1] = points[:, 2]
g[:, 2] = points[:, 2]
b = np.zeros((points.shape[0], 3))
b[:, 0] = points[:, 0]
b[:, 1] = points[:, 3]
b[:, 2] = points[:, 3]
cmap_points = {'red': r, 'green': g, 'blue': b}
cmap = matplotlib.colors.LinearSegmentedColormap('my_cmap', cmap_points)
return cmap
class LUT():
def __init__(self, lutfile, p0, p1, s0, s1, posdef=False):
'''given a LUT file from the PPMstar visualisation software and the
colour compression variables s0, s1, p0 and p0 that were used to
compress the values of the variable being visualised (e.g., radial
velocity), this object contains the information needed to draw
colourbars with matplotlib for a PPMstar volume rendering.
The values of s0, s1, p0 and p1 should be stored in the file
compression_variables.txt in the setup directory of the respective
project.
Examples:
---------
import ppm
lut = ppm.LUT('./LUTS/BW-1536-UR-3.lut', s0=5., s1=245.499,
p0=0., p1=1.747543E-02/8.790856E-03, posdef=False)
cbar = lut.make_colourbar([-1,-0.5,-0.25,-0.1,0,0.1,0.25,0.5,1])
cbar.set_label('$v_\mathrm{r}\,/\,1000\,km\,s^{-1}$',size=6)
draw()
'''
self.colours = []
self.alphas = []
self.cmap = None
self.s0 = s0
self.s1 = s1
self.p0 = p0
self.p1 = p1
# is the variable positive definite (otherwise signed):
self.posdef = posdef
with open(lutfile,'r') as f:
nlines = len(f.readlines())
f.seek(0)
for i in range(nlines):
spl = f.readline().split()
index = int(spl[1])
if spl[0][0] == 'C':
rgb_tuple = (float(spl[2]),
float(spl[3]),
float(spl[4]))
self.colours.append([index, rgb_tuple])
elif spl[0][0] == 'A':
self.alphas.append([index, float(spl[2])])
else:
raise IOError("unrecognised LUT file format")
f.close()
self.ncolours = len(self.colours)
self.nalphas = len(self.alphas)
def make_colourbar(self, ticks=[], horizontal=True, background=(0,0,0),
scale_factor=1.):
'''make a colourbar for a PPMstar volume rendering given the mapping
between the [0-255] colour indices and the values to which they
correspond. returns a matplotlib.pyplot.colorbar instance, for ease of
editing the colorbar
Parameters:
-----------
ticks: numpy array
at which values of the variable you would like to have ticks on the
colourbar
scale_factor: float
dividing your ticks by this number should give the real values in
code units. so, if I give ticks in km/s, I should give
scale_factor=1.e3
Examples:
---------
import ppm
lut = ppm.LUT('./LUTS/BW-1536-UR-3.lut', s0=5., s1=245.499, p0=0., p1=1.747543E-02/8.790856E-03, posdef=False)
cbar=lut.make_colourbar(np.linspace(-100,100,5),background=(0.4117647058823529,0.4117647058823529,0.4235294117647059),scale_factor=1.e3)
cbar.set_label('$v_\mathrm{r}\,/\,km\,s^{-1}$',size=6)
draw()
'''
if background != (1,1,1):
fgcolor = (1, 1, 1)
pl.rcParams['text.color'] = fgcolor
pl.rcParams['xtick.color'] = fgcolor
pl.rcParams['ytick.color'] = fgcolor
pl.rcParams['axes.edgecolor'] = fgcolor
pl.rcParams['axes.labelcolor'] = fgcolor
pl.rcParams['figure.edgecolor'] = background
pl.rcParams['figure.facecolor'] = background
pl.rcParams['savefig.edgecolor'] = background
pl.rcParams['savefig.facecolor'] = background
pl.rcParams['axes.facecolor'] = background
colours = copy.deepcopy(self.colours)
ticks = np.array(ticks)/scale_factor
# determine which codec to use
self.map_values = map_posdef if self.posdef else map_signed
self.inv_map_values = inv_map_posdef if self.posdef else inv_map_signed
# make sure we have min and max for both colour index and values, and
# also the locations that the ticks are going to be placed
if ticks == []:
minidx, maxidx = self.s0+1, self.s1-1
minval,maxval = self.inv_map_values([minidx,maxidx],self.p0,self.p1,self.s0,self.s1)
ticks = np.linspace(minval, maxval, 8)
else:
# ticks given
minval = ticks[0]; maxval = ticks[-1]
minidx = self.map_values(minval,self.p0,self.p1,self.s0,self.s1)
maxidx = self.map_values(maxval,self.p0,self.p1,self.s0,self.s1)
colour_index_ticks = [self.map_values(vt,self.p0,self.p1,self.s0,self.s1) for vt in ticks]
colour_index_ticks = np.array(colour_index_ticks)
if any(np.isinf(colour_index_ticks)):
print 'ticks out of range'
return
print 'min/max ticks being set to:', minval, maxval
print 'corresponding to colour indices:', minidx, maxidx
print 'ticks being placed at:', ticks
print 'with colour indices:', colour_index_ticks
# OK, so now we have to make the colour map on the fly (because we are
# having to sample the original LUT for the subsection we are
# interested in.
# This means normalising the appropriate interval to the interval
# [0-255] and constructing new left and right edges, passing the colours (and maybe alphas)
# to the make_colourmap function.
# left:
i0 = np.where(np.array([c[0] for c in colours]) <= minidx)[0][-1]
i1 = i0 + 1
ileft = i1
idx0 = colours[i0][0]
idx1 = colours[i1][0]
r0, r1 = colours[i0][1][0], colours[i1][1][0]
g0, g1 = colours[i0][1][1], colours[i1][1][1]
b0, b1 = colours[i0][1][2], colours[i1][1][2]
rl = r0 + (r1 - r0)/(idx1 - idx0) * (minidx - idx0)
gl = g0 + (g1 - g0)/(idx1 - idx0) * (minidx - idx0)
bl = b0 + (b1 - b0)/(idx1 - idx0) * (minidx - idx0)
# right:
i0 = np.where(np.array([c[0] for c in colours]) <= maxidx)[0][-1]
i1 = i0 + 1
iright = i1
idx0 = colours[i0][0]
idx1 = colours[i1][0]
r0, r1 = colours[i0][1][0], colours[i1][1][0]
g0, g1 = colours[i0][1][1], colours[i1][1][1]
b0, b1 = colours[i0][1][2], colours[i1][1][2]
rr = r0 + (r1 - r0)/(idx1 - idx0) * (maxidx - idx0)
gr = g0 + (g1 - g0)/(idx1 - idx0) * (maxidx - idx0)
br = b0 + (b1 - b0)/(idx1 - idx0) * (maxidx - idx0)
print ileft, iright, minidx, maxidx
to_splice = copy.deepcopy(colours)[ileft:iright]
newcolours = [[minidx, (rl, gl, bl)]] + to_splice + [[maxidx, (rr, gr, br)]]
# now normalise the indices to [0-255]
indices = np.array([c[0] for c in newcolours])
newindices = 255.*(indices - np.min(indices)) / indices.ptp()
# renormalise index tick locations as well
colour_index_ticks = 255.*(colour_index_ticks - np.min(colour_index_ticks)) / colour_index_ticks.ptp()
print 'new colour indices:', newindices
print 'ticks now at:', colour_index_ticks
for i in range(len(newcolours)):
newcolours[i][0] = newindices[i]
self.cmap = make_colourmap(newcolours)
x = np.linspace(0, 256, 257)
y = x.copy()
xx, yy = np.meshgrid(x, y)
mat = xx.copy()
pl.figure()
pcol = pl.pcolor(x, y, mat.T, cmap=self.cmap)
pl.gca().xaxis.set_visible(False)
pl.ylabel('colour index')
if horizontal:
cbar = pl.colorbar(orientation='horizontal', ticks = colour_index_ticks)
cbar.ax.set_xticklabels(ticks*scale_factor)
else:
cbar = pl.colorbar(ticks = colour_index_ticks)
cbar.ax.set_yticklabels(ticks*scale_factor)
cbar.solids.set_edgecolor('face')
cbar.ax.tick_params(axis=u'both', which=u'both',length=0,labelsize=6)
pl.draw()
return cbar
def cmap_from_str(str, segment=None):
points = []
for line in str.splitlines():
parts = line.split()
if (len(parts) == 5) and (parts[0] == 'Cnot:'):
points.append(parts[1:])
points = np.array(points, dtype=np.float)
points = points[points[:,0].argsort()]
if segment is not None:
# Index of the first point with value > segment[0].
idx0 = np.argmax(points[:, 0] > segment[0])
if idx0 > 0:
t = (float(segment[0]) - points[idx0 - 1, 0])/ \
(points[idx0, 0] - points[idx0 - 1, 0])
new_point = (1. - t)*points[idx0 - 1, :] + t*points[idx0, :]
points = np.vstack([new_point, points[idx0:, :]])
# Index of the first point with value > segment[1].
idx1 = np.argmax(points[:, 0] > segment[1])
if idx1 > 0:
t = (float(segment[1]) - points[idx1 - 1, 0])/ \
(points[idx1, 0] - points[idx1 - 1, 0])
if t > 0.:
new_point = (1. - t)*points[idx1 - 1, :] + t*points[idx1, :]
points = np.vstack([points[0:idx1, :], new_point])
else:
points = points[0:idx1, :]
p0 = points[0, 0]
p1 = points[-1, 0]
for i in range(points.shape[0]):
points[i, 0] = (points[i, 0] - p0)/(p1 - p0)
r = np.zeros((points.shape[0], 3))
r[:, 0] = points[:, 0]
r[:, 1] = points[:, 1]
r[:, 2] = points[:, 1]
g = np.zeros((points.shape[0], 3))
g[:, 0] = points[:, 0]
g[:, 1] = points[:, 2]
g[:, 2] = points[:, 2]
b = np.zeros((points.shape[0], 3))
b[:, 0] = points[:, 0]
b[:, 1] = points[:, 3]
b[:, 2] = points[:, 3]
cmap_points = {'red': r, 'green': g, 'blue': b}
cmap = LinearSegmentedColormap('my_cmap', cmap_points)
return cmap
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.20/_downloads/46bc4cee057a363d1806112746fb0369/plot_receptive_field_mtrf.py | 1 | 11522 | """
.. _ex-receptive-field-mtrf:
=========================================
Receptive Field Estimation and Prediction
=========================================
This example reproduces figures from Lalor et al's mTRF toolbox in
matlab [1]_. We will show how the :class:`mne.decoding.ReceptiveField` class
can perform a similar function along with scikit-learn. We will first fit a
linear encoding model using the continuously-varying speech envelope to predict
activity of a 128 channel EEG system. Then, we will take the reverse approach
and try to predict the speech envelope from the EEG (known in the literature
as a decoding model, or simply stimulus reconstruction).
References
----------
.. [1] Crosse, M. J., Di Liberto, G. M., Bednar, A. & Lalor, E. C. (2016).
The Multivariate Temporal Response Function (mTRF) Toolbox:
A MATLAB Toolbox for Relating Neural Signals to Continuous Stimuli.
Frontiers in Human Neuroscience 10, 604. doi:10.3389/fnhum.2016.00604
.. [2] Haufe, S., Meinecke, F., Goergen, K., Daehne, S., Haynes, J.-D.,
Blankertz, B., & Biessmann, F. (2014). On the interpretation of weight
vectors of linear models in multivariate neuroimaging. NeuroImage, 87,
96-110. doi:10.1016/j.neuroimage.2013.10.067
.. _figure 1: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F1
.. _figure 2: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F2
.. _figure 5: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F5
""" # noqa: E501
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
# Nicolas Barascud <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 3
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
###############################################################################
# Load the data from the publication
# ----------------------------------
#
# First we will load the data collected in [1]_. In this experiment subjects
# listened to natural speech. Raw EEG and the speech stimulus are provided.
# We will load these below, downsampling the data in order to speed up
# computation since we know that our features are primarily low-frequency in
# nature. Then we'll visualize both the EEG and speech envelope.
path = mne.datasets.mtrf.data_path()
decim = 2
data = loadmat(join(path, 'speech_data.mat'))
raw = data['EEG'].T
speech = data['envelope'].T
sfreq = float(data['Fs'])
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad='auto')
raw = mne.filter.resample(raw, down=decim, npad='auto')
# Read in channel positions and create our MNE objects from the raw data
montage = mne.channels.make_standard_montage('biosemi128')
info = mne.create_info(montage.ch_names, sfreq, 'eeg').set_montage(montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1)
ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False)
ax.set(title="Sample activity", xlabel="Time (s)")
mne.viz.tight_layout()
###############################################################################
# Create and fit a receptive field model
# --------------------------------------
#
# We will construct an encoding model to find the linear relationship between
# a time-delayed version of the speech envelope and the EEG signal. This allows
# us to make predictions about the response to new stimuli.
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'],
estimator=1., scoring='corrcoef')
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Outputs for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_scores = scores.mean(axis=0)
# Plot mean prediction scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls='--', color='r')
ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)")
mne.viz.tight_layout()
###############################################################################
# Investigate model coefficients
# ==============================
# Finally, we will look at how the linear coefficients (sometimes
# referred to as beta values) are distributed across time delays as well as
# across the scalp. We will recreate `figure 1`_ and `figure 2`_ from [1]_.
# Print mean coefficients across all time delays / channels (see Fig 1 in [1])
time_plot = 0.180 # For highlighting a specific time.
fig, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r',
vmin=-max_coef, vmax=max_coef, shading='gouraud')
ax.axvline(time_plot, ls='--', color='k', lw=2)
ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients",
xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0],
xticks=np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation=45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay (see Fig 2C in [1])
ix_plot = np.argmin(np.abs(time_plot - times))
fig, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False,
vmin=-max_coef, vmax=max_coef)
ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
###############################################################################
# Create and fit a stimulus reconstruction model
# ----------------------------------------------
#
# We will now demonstrate another use case for the for the
# :class:`mne.decoding.ReceptiveField` class as we try to predict the stimulus
# activity from the EEG data. This is known in the literature as a decoding, or
# stimulus reconstruction model [1]_. A decoding model aims to find the
# relationship between the speech signal and a time-delayed version of the EEG.
# This can be useful as we exploit all of the available neural data in a
# multivariate context, compared to the encoding case which treats each M/EEG
# channel as an independent feature. Therefore, decoding models might provide a
# better quality of fit (at the expense of not controlling for stimulus
# covariance), especially for low SNR stimuli such as speech.
# We use the same lags as in [1]. Negative lags now index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting (cf. next section). We'll use a ridge regression estimator with an
# alpha value similar to [1].
sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names,
estimator=1e4, scoring='corrcoef', patterns=True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_patterns = patterns.mean(axis=0)
mean_scores = scores.mean(axis=0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
###############################################################################
# Visualize stimulus reconstruction
# =================================
#
# To get a sense of our model performance, we can plot the actual and predicted
# stimulus envelopes side by side.
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color='grey', lw=2, ls='--')
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False)
ax.set(title="Stimulus reconstruction")
ax.set_xlabel('Time (s)')
mne.viz.tight_layout()
###############################################################################
# Investigate model coefficients
# ==============================
#
# Finally, we will look at how the decoding model coefficients are distributed
# across the scalp. We will attempt to recreate `figure 5`_ from [1]_. The
# decoding model weights reflect the channels that contribute most toward
# reconstructing the stimulus signal, but are not directly interpretable in a
# neurophysiological sense. Here we also look at the coefficients obtained
# via an inversion procedure [2]_, which have a more straightforward
# interpretation as their value (and sign) directly relates to the stimulus
# signal's strength (and effect direction).
time_plot = (-.140, -.125) # To average between two timepoints.
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fig, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1),
pos=info, axes=ax[0], show=False,
vmin=-max_coef, vmax=max_coef)
ax[0].set(title="Model coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1),
pos=info, axes=ax[1],
show=False, vmin=-max_patterns, vmax=max_patterns)
ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
plt.show()
| bsd-3-clause |
GarmanGroup/RABDAM | rabdam/Subroutines/BDamage.py | 1 | 4920 |
# RABDAM
# Copyright (C) 2020 Garman Group, University of Oxford
# This file is part of RABDAM.
# RABDAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# RABDAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import numpy as np
def get_xyz_from_objects(bdamAtomList):
"""
Returns numpy arrays of the x, y and z coordinates of the atoms to be
included in the BDamage calculation.
"""
au_atom_coords = np.zeros([len(bdamAtomList), 3])
for i, atom in enumerate(bdamAtomList):
au_atom_coords[i, :] = np.array([atom.xyzCoords[0][0],
atom.xyzCoords[1][0],
atom.xyzCoords[2][0]])
return au_atom_coords
def calc_packing_density(xyz_au_atom, xyz_surr_atom, pack_dens_thresh):
"""
Calculates the packing density of each atom in the subset of atoms to
be considered for BDamage analysis.
"""
num_au_atoms = xyz_au_atom.shape[0]
packing_density_array = np.zeros([num_au_atoms, 1])
for i in range(num_au_atoms):
distances = np.sqrt(np.square(xyz_surr_atom[:, :] - xyz_au_atom[i, :]).sum(axis=1))
packing_density_array[i][0] = np.sum(distances < pack_dens_thresh) - 1 # Subtract
# 1 to correct for the atom itself being counted.
return packing_density_array
def write_pckg_dens_to_atoms(bdamAtomList, packing_density_array):
"""
Writes packing density values to their corresponding atom objects.
"""
for i, atom in enumerate(bdamAtomList):
atom.pd = packing_density_array[i][0]
def calcBDam(bdamAtomList, window):
"""
All atoms to be considered for BDamage analysis are ordered via their
packing density values; the BDamage value of each atom is then
calculated as the ratio of its B-factor as compared to the average of the
B-factor values of similarly (identified via sliding window) packed atoms.
"""
import math
import random
import pandas as pd
# Initialises lists of the atom properties required to calculate BDamage.
ATMNUM = [None]*len(bdamAtomList)
BFAC = [None]*len(bdamAtomList)
PD = [None]*len(bdamAtomList)
# Lists are filled with property values associated with each of the atoms
# considered for BDamage analysis, then concatenated into the columns of a
# DataFrame.
for index, atm in enumerate(bdamAtomList):
ATMNUM[index] = atm.atomNum
BFAC[index] = atm.bFactor
PD[index] = atm.pd
df = pd.DataFrame({'ATMNUM': ATMNUM,
'BFAC': BFAC,
'PD': PD})
"""
index = [i for i in range(df.shape[0])]
random.shuffle(index)
df = df.iloc[index].reset_index(drop=True)
"""
# DataFrame rows are sorted by packing density (and next by atom number
# in cases of equal packing density). Average B-factor values are then
# calculated via a rolling mean approach with a window size as specified in
# the input file (default = 2%). In the cases of those atoms which lie too
# close to either edge of the packing density distribution to lie at the
# centre of a full-sized window, the average B-factor value of each of
# these atoms is taken from the closest complete window.
#df = df.sort_values(by=['PD'], ascending=[True])
df = df.sort_values(by=['PD', 'ATMNUM'], ascending=[True, True])
df = df.reset_index(drop=True)
ser = df['BFAC']
ser = ser.rename('AVRG_BF')
ser = ser.rolling(window=window, center=True).mean()
ser = ser.fillna(0)
index_list = range(0, len(bdamAtomList))
index = pd.Series(index_list)
index = index.rename('INDEX')
df = pd.concat([df, ser, index], axis=1)
df.loc[(df.AVRG_BF == 0) & (df.INDEX <= (math.floor(window/2)-1)),
'AVRG_BF'] += df.BFAC.values[0:window].mean(axis=0)
df.loc[(df.AVRG_BF == 0) & (df.INDEX >= (len(bdamAtomList) - math.floor(window/2))),
'AVRG_BF'] += df.BFAC.values[(len(bdamAtomList)-window):len(bdamAtomList)].mean(axis=0)
# The BDamage value of each atom in the DataFrame is calculated as the
# ratio of its B factor value to its associated average B factor value.
atmnum_list = df.ATMNUM.tolist()
for atm in bdamAtomList:
index = atmnum_list.index(atm.atomNum)
atm.avrg_bf = df.AVRG_BF[index]
atm.bd = atm.bFactor / atm.avrg_bf
| lgpl-3.0 |
antoinecarme/pyaf | tests/temporal_hierarchy/test_temporal_demo_hourly_H_6H_12H_D.py | 1 | 1539 | # %matplotlib inline
import pyaf
import numpy as np
import pandas as pd
DATA_FREQ = 'H'
PERIODS = ["H" , "6H" , "12H", "D"]
H = 365
N = H * 10
lDateColumn = "Date"
lSignalVar = "Signal";
START_TIME = "2001-01-25"
# generate a daily signal covering one year 2016 in a pandas dataframe
np.random.seed(seed=1960)
df_train = pd.DataFrame({lDateColumn : pd.date_range(start=START_TIME, periods=N, freq=DATA_FREQ),
lSignalVar : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
lHierarchy = {};
lHierarchy['Levels'] = None;
lHierarchy['Data'] = None;
lHierarchy['Groups']= {};
lHierarchy['Periods']= PERIODS
lHierarchy['Type'] = "Temporal";
# create a model to plot the hierarchy.
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
lSignalHierarchy = lEngine.plot_Hierarchy(df_train , lDateColumn, lSignalVar, H,
lHierarchy, None);
# print(lSignalHierarchy.__dict__)
# create a hierarchical model and train it
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
# lEngine.mOptions.mNbCores = 1
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lSignalHierarchy = lEngine.train(df_train , lDateColumn, lSignalVar, H, lHierarchy, None);
lEngine.getModelInfo();
dfapp_in = df_train.copy();
dfapp_in.info()
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.info()
print(dfapp_out.tail())
| bsd-3-clause |
alphaBenj/zipline | zipline/data/history_loader.py | 5 | 22513 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
from numpy import concatenate
from lru import LRU
from pandas import isnull
from pandas.tslib import normalize_date
from toolz import sliding_window
from six import with_metaclass
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.lib._int64window import AdjustedArrayWindow as Int64Window
from zipline.lib._float64window import AdjustedArrayWindow as Float64Window
from zipline.lib.adjustment import Float64Multiply, Float64Add
from zipline.utils.cache import ExpiringCache
from zipline.utils.math_utils import number_of_decimal_places
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import float64_dtype
from zipline.utils.pandas_utils import find_in_sorted_index
# Default number of decimal places used for rounding asset prices.
DEFAULT_ASSET_PRICE_DECIMALS = 3
class HistoryCompatibleUSEquityAdjustmentReader(object):
def __init__(self, adjustment_reader):
self._adjustments_reader = adjustment_reader
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs
class ContinuousFutureAdjustmentReader(object):
"""
Calculates adjustments for continuous futures, based on the
close and open of the contracts on the either side of each roll.
"""
def __init__(self,
trading_calendar,
asset_finder,
bar_reader,
roll_finders,
frequency):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._bar_reader = bar_reader
self._roll_finders = roll_finders
self._frequency = frequency
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _make_adjustment(self,
adjustment_type,
front_close,
back_close,
end_loc):
adj_base = back_close - front_close
if adjustment_type == 'mul':
adj_value = 1.0 + adj_base / front_close
adj_class = Float64Multiply
elif adjustment_type == 'add':
adj_value = adj_base
adj_class = Float64Add
return adj_class(0,
end_loc,
0,
0,
adj_value)
def _get_adjustments_in_range(self, cf, dts, field):
if field == 'volume' or field == 'sid':
return {}
if cf.adjustment is None:
return {}
rf = self._roll_finders[cf.roll_style]
partitions = []
rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1],
cf.offset)
tc = self._trading_calendar
adjs = {}
for front, back in sliding_window(2, rolls):
front_sid, roll_dt = front
back_sid = back[0]
dt = tc.previous_session_label(roll_dt)
if self._frequency == 'minute':
dt = tc.open_and_close_for_session(dt)[1]
roll_dt = tc.open_and_close_for_session(roll_dt)[0]
partitions.append((front_sid,
back_sid,
dt,
roll_dt))
for partition in partitions:
front_sid, back_sid, dt, roll_dt = partition
last_front_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(front_sid), dt)
last_back_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(back_sid), dt)
if isnull(last_front_dt) or isnull(last_back_dt):
continue
front_close = self._bar_reader.get_value(
front_sid, last_front_dt, 'close')
back_close = self._bar_reader.get_value(
back_sid, last_back_dt, 'close')
adj_loc = dts.searchsorted(roll_dt)
end_loc = adj_loc - 1
adj = self._make_adjustment(cf.adjustment,
front_close,
back_close,
end_loc)
try:
adjs[adj_loc].append(adj)
except KeyError:
adjs[adj_loc] = [adj]
return adjs
class SlidingWindow(object):
"""
Wrapper around an AdjustedArrayWindow which supports monotonically
increasing (by datetime) requests for a sized window of data.
Parameters
----------
window : AdjustedArrayWindow
Window of pricing data with prefetched values beyond the current
simulation dt.
cal_start : int
Index in the overall calendar at which the window starts.
"""
def __init__(self, window, size, cal_start, offset):
self.window = window
self.cal_start = cal_start
self.current = next(window)
self.offset = offset
self.most_recent_ix = self.cal_start + size
def get(self, end_ix):
"""
Returns
-------
out : A np.ndarray of the equity pricing up to end_ix after adjustments
and rounding have been applied.
"""
if self.most_recent_ix == end_ix:
return self.current
target = end_ix - self.cal_start - self.offset + 1
self.current = self.window.seek(target)
self.most_recent_ix = end_ix
return self.current
class HistoryLoader(with_metaclass(ABCMeta)):
"""
Loader for sliding history windows, with support for adjustments.
Parameters
----------
trading_calendar: TradingCalendar
Contains the grouping logic needed to assign minutes to periods.
reader : DailyBarReader, MinuteBarReader
Reader for pricing bars.
adjustment_reader : SQLiteAdjustmentReader
Reader for adjustment data.
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid')
def __init__(self, trading_calendar, reader, equity_adjustment_reader,
asset_finder,
roll_finders=None,
sid_cache_size=1000,
prefetch_length=0):
self.trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._reader = reader
self._adjustment_readers = {}
if equity_adjustment_reader is not None:
self._adjustment_readers[Equity] = \
HistoryCompatibleUSEquityAdjustmentReader(
equity_adjustment_reader)
if roll_finders:
self._adjustment_readers[ContinuousFuture] =\
ContinuousFutureAdjustmentReader(trading_calendar,
asset_finder,
reader,
roll_finders,
self._frequency)
self._window_blocks = {
field: ExpiringCache(LRU(sid_cache_size))
for field in self.FIELDS
}
self._prefetch_length = prefetch_length
@abstractproperty
def _frequency(self):
pass
@abstractproperty
def _calendar(self):
pass
@abstractmethod
def _array(self, start, end, assets, field):
pass
def _decimal_places_for_asset(self, asset, reference_date):
if isinstance(asset, Future) and asset.tick_size:
return number_of_decimal_places(asset.tick_size)
elif isinstance(asset, ContinuousFuture):
# Tick size should be the same for all contracts of a continuous
# future, so arbitrarily get the contract with next upcoming auto
# close date.
oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)
contract_sid = oc.contract_before_auto_close(reference_date.value)
if contract_sid is not None:
contract = self._asset_finder.retrieve_asset(contract_sid)
if contract.tick_size:
return number_of_decimal_places(contract.tick_size)
return DEFAULT_ASSET_PRICE_DECIMALS
def _ensure_sliding_windows(self, assets, dts, field,
is_perspective_after):
"""
Ensure that there is a Float64Multiply window for each asset that can
provide data for the given parameters.
If the corresponding window for the (assets, len(dts), field) does not
exist, then create a new one.
If a corresponding window does exist for (assets, len(dts), field), but
can not provide data for the current dts range, then create a new
one and replace the expired window.
Parameters
----------
assets : iterable of Assets
The assets in the window
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
see: `PricingHistoryLoader.history`
Returns
-------
out : list of Float64Window with sufficient data so that each asset's
window can provide `get` for the index corresponding with the last
value in `dts`
"""
end = dts[-1]
size = len(dts)
asset_windows = {}
needed_assets = []
cal = self._calendar
assets = self._asset_finder.retrieve_all(assets)
end_ix = find_in_sorted_index(cal, end)
for asset in assets:
try:
window = self._window_blocks[field].get(
(asset, size, is_perspective_after), end)
except KeyError:
needed_assets.append(asset)
else:
if end_ix < window.most_recent_ix:
# Window needs reset. Requested end index occurs before the
# end index from the previous history call for this window.
# Grab new window instead of rewinding adjustments.
needed_assets.append(asset)
else:
asset_windows[asset] = window
if needed_assets:
offset = 0
start_ix = find_in_sorted_index(cal, dts[0])
prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)
prefetch_end = cal[prefetch_end_ix]
prefetch_dts = cal[start_ix:prefetch_end_ix + 1]
if is_perspective_after:
adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)
adj_dts = cal[start_ix:adj_end_ix + 1]
else:
adj_dts = prefetch_dts
prefetch_len = len(prefetch_dts)
array = self._array(prefetch_dts, needed_assets, field)
if field == 'sid':
window_type = Int64Window
else:
window_type = Float64Window
view_kwargs = {}
if field == 'volume':
array = array.astype(float64_dtype)
for i, asset in enumerate(needed_assets):
adj_reader = None
try:
adj_reader = self._adjustment_readers[type(asset)]
except KeyError:
adj_reader = None
if adj_reader is not None:
adjs = adj_reader.load_adjustments(
[field], adj_dts, [asset])[0]
else:
adjs = {}
window = window_type(
array[:, i].reshape(prefetch_len, 1),
view_kwargs,
adjs,
offset,
size,
int(is_perspective_after),
self._decimal_places_for_asset(asset, dts[-1]),
)
sliding_window = SlidingWindow(window, size, start_ix, offset)
asset_windows[asset] = sliding_window
self._window_blocks[field].set(
(asset, size, is_perspective_after),
sliding_window,
prefetch_end)
return [asset_windows[asset] for asset in assets]
def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
)
class DailyHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'daily'
@property
def _calendar(self):
return self._reader.sessions
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
class MinuteHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'minute'
@lazyval
def _calendar(self):
mm = self.trading_calendar.all_minutes
start = mm.searchsorted(self._reader.first_trading_day)
end = mm.searchsorted(self._reader.last_available_dt, side='right')
return mm[start:end]
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
| apache-2.0 |
zooniverse/aggregation | experimental/condor/divisiveClustering.py | 2 | 3011 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
import random
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-06']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
to_sample_from = list(subject_collection.find({"state":"complete"}))
#to_sample_from = list(subject_collection.find({"classification_count":10}))
for subject_count,subject in enumerate(random.sample(to_sample_from,500)):
zooniverse_id = subject["zooniverse_id"]
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
annotation_list = []
user_list = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
try:
animal_type = animal["animal"]
#if not(animal_type in ["carcassOrScale","carcass"]):
if animal_type == "condor":
annotation_list.append((x,y))
user_list.append(user_index)
except KeyError:
annotation_list.append((x,y))
user_list.append(user_index)
except ValueError:
pass
if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)):
urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id)
user_identified_condors = DivisiveKmeans(3).fit2(annotation_list,user_list)#,jpeg_file=base_directory+"/Databases/condors/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
#plt.show()
#
if user_identified_condors != []:
x,y = zip(*user_identified_condors)
plt.plot(x,y,'.',color='blue')
print zooniverse_id
print user_identified_condors
plt.show()
else:
plt.close()
| apache-2.0 |
jorik041/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
RedhawkSDR/FMDemodulator | tests/test_FMDemodulator.py | 1 | 4024 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of FMDemodulator.
#
# FMDemodulator is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# FMDemodulator is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import numpy as np
import scipy.signal as sp
from ossie.utils import sb
import time
import scipy
import matplotlib.pyplot as plt
print '*****************************************'
print '******* Liquid FM Demod Unit Test *******'
print '*****************************************'
print '\n*********** Generating Data *************'
fs = 256000.0 # Sample Rate
fm = 250.0 # Signal Frequency (Hz)
fc = 1000.0 # Carrier Frequency (Hz)
b = (fc - fm) / fm # Modulation Index
# Generate a time signal
t = np.arange(0, 1, 1.0 / fs)
# Generate the message signals for modulation
x = np.sin(2 * np.pi * fm * t)
#Modulate the signal
modfm = np.cos(2 * np.pi * fc * t + b * x)
#Apply the Hilbert Transform
complexModFM = sp.hilbert(modfm)
#Data to be pushed
pushComplexModFM = np.ndarray(2 * len(complexModFM))
for i in range(0,len(pushComplexModFM),2):
pushComplexModFM[i] = complexModFM[i/2].real
pushComplexModFM[i+1] = complexModFM[i/2].imag
print '\n********* Creating Components ***********'
#Set up components and helpers for the waveform
fmDemod = sb.launch('../FMDemodulator.spd.xml', execparams={'DEBUG_LEVEL':5})
dataSource = sb.DataSource(bytesPerPush=pushComplexModFM.nbytes)
dataSink = sb.DataSink()
print '\n********* Creating Connections **********'
#Make connections
dataSource.connect(fmDemod)
fmDemod.connect(dataSink)
print 'Connections Created'
#Start sandbox env
print '\n*********** Starting Sandbox ************'
sb.start()
print 'Components Started'
#Run unit test
print 'Pushing data'
dataSource.push(pushComplexModFM.tolist(), sampleRate=fs, complexData=False)
time.sleep(1)
output = dataSink.getData()
#Stop sandbox env
print '\n*********** Stopping Sandbox ************'
sb.stop()
print 'Components stopped'
# Remove DC Bias from output
bias = sum(output)/float(len(output))
output = [(item-bias) for item in output]
# Scale output to have same magnitude as input
output_max = max(output[0:len(x)])
x_max = max(x[0:len(x)])
gain_adjust = output_max/x_max
output = [(item/gain_adjust) for item in output]
# Shift output to have same phase as input
count = 0
output_i = 0
if output[0] > 0:
last_sign = 1
else:
last_sign = -1
for i,j in enumerate(output[0:]):
if j > 0:
sign = 1
else:
sign = -1
if last_sign != sign:
count += 1
last_sign = sign
if count == 10:
output_i = i
count = 0
x_i = 0
if x[0] > 0:
last_sign = 1
else:
last_sign = -1
for i, j in enumerate(x[0:]):
if j > 0:
sign = 1
else:
sign = -1
if last_sign != sign:
count += 1
last_sign = sign
if count == 10:
x_i = i
difference = x_i - output_i
output = np.roll(output, difference)
#Plot the data
#plt.plot(x)
#plt.plot(modfm)
#plt.plot(output)
#plt.show()
#Validate results
assert len(x) == len(output), 'Input signal and output result are not equal'
sumError = sum([abs(y-z) for y,z in zip(output[0:],x[0:])])
meanError = sumError/len(output)
#print 'meanError =',meanError
passed = True
if meanError > .1:
passed = False
print '\n************** Results ****************'
if passed:
print "Unit Test 1 .........................",u'\u2714'
else:
print "Unit Test 1 .........................",u'\u2718'
print '\nTest Complete\n' | gpl-3.0 |
jonwright/ImageD11 | sandbox/rl.py | 1 | 11964 |
from __future__ import print_function
#!/usr/bin/env fable.python
import numpy as np
from six.moves import input
try:
import fftw3f
REAL = np.float32
class convolver(object):
plans = {}
def __init__(self, pointspread, dims):
"""
dims = target dimensions
pointspread = pointspread function to use
"""
self.pointspread = pointspread
self.dims = dims
self.N = self.dims[0] * self.dims[1]
self.PSF = centre_psf( pointspread, dims )
self.QPSF = self._do_fft( self.PSF, 'forward' )
self.eps = 1.0/65535
def release_plan(self, input):
"""
release the memory from previous plans
"""
if input.ctypes.data in self.plans:
# no idea if it leaks elsewhere
del self.plans[ input.ctypes.data ]
def _do_fft( self, input, direct):
"""
Does the fft using fftw3f
It caches the plan and result of the fft so that when you
do it again it will be faster
This means you are limited to only using a few arrays before
you exhaust the machines memory
see also release_plan
"""
key = input.ctypes.data
if key in self.plans:
output , plan = self.plans[key]
plan.execute()
return output
# we've never done an fft of this array
if len(list(self.plans.keys()))>6:
print("fftw3 is making too much cache")
print("needs to be programmed to reuse the same arrays")
if direct == 'forward':
assert input.dtype == np.float32
output = np.zeros( input.shape, np.complex64)
else:
assert input.dtype == np.complex64
output = np.zeros( input.shape, np.float32 )
fp = fftw3f.Plan(
inarray = input,
outarray = output,
direction = direct,
flags = ['estimate'] )
fp.execute()
self.plans[ input.ctypes.data ] = output, fp
return output
def convolve(self, image):
"""
Does a convolution
Currently using wraparound - which should probably be changed to clamp
"""
assert image.shape == self.dims
qim = self._do_fft( image, 'forward' )
np.multiply( qim ,self.QPSF, qim )
out = self._do_fft( qim, 'backward' )
np.divide( out, self.N, out)
return out
def convolve_transp(self, image):
"""
Does a convolution
Currently using wraparound - which should probably be changed to clamp
"""
assert image.shape == self.dims
qim = self._do_fft( image, 'forward' )
np.multiply( qim, self.QPSF.conj(), qim)
out = self._do_fft( qim, 'backward' )
np.divide( out, self.N, out)
return out
# raise ImportError()
print("Using fftw3f, should perform better")
except ImportError:
print("Using numpy fft for convolution")
print("You might get better performance from fftw, why not try installing:")
print("http://prdownload.berlios.de/pyfftw/PyFFTW3-0.2.tar.gz")
REAL = np.float
class convolver(object):
def __init__(self, pointspread, dims):
"""
dims = target dimensions
pointspread = pointspread function to use
"""
self.pointspread = pointspread
self.dims = dims
self.PSF = centre_psf( pointspread, dims )
self.QPSF = np.fft.rfft2( self.PSF )
self.eps = 1.0/65535
def convolve(self, image):
"""
Does a convolution
Currently using wraparound - which should probably be changed to clamp
"""
assert image.shape == self.dims
qim = np.fft.rfft2( image )
return np.fft.irfft2( qim * self.QPSF )
def convolve_transp(self, image):
"""
Does a convolution
Currently using wraparound - which should probably be changed to clamp
"""
assert image.shape == self.dims
qim = np.fft.rfft2( image )
return np.fft.irfft2( qim * self.QPSF.conj() )
def centre_psf( psf_array, target_image_dims, DEBUG=False ):
"""
Shifts the centre of mass of the point spread function to (0,0)
and copies it into a zero padded array of size target_image_dims
Function is normalised to have integral of 1.0
"""
assert psf_array.shape[0] < target_image_dims[0]
assert psf_array.shape[1] < target_image_dims[1]
proj_0 = psf_array.sum(axis = 1, dtype=REAL)
n0 = len(proj_0)/2
com_0 = n0 + (proj_0*np.arange(-n0, len(proj_0)-n0)).sum() / proj_0.sum()
com_0 = int(com_0)
proj_1 = psf_array.sum(axis = 0, dtype=REAL)
n1 = len(proj_1)/2
com_1 = n1 + (proj_1*np.arange(-n1, len(proj_1)-n1)).sum() / proj_1.sum()
com_1 = int(com_1)
output = np.zeros( target_image_dims, REAL )
#
# 4 corners:
#
p0, p1 = psf_array.shape
output[:p0-com_0, :p1-com_1 ] = psf_array[com_0:, com_1:]
if com_0 > 0:
output[:p0-com_0, -com_1:] = psf_array[com_0:, :com_1]
if com_1 > 0:
output[ -com_0:, :p1-com_1] = psf_array[:com_0, com_1:]
if com_0 > 0 and com_1 > 0:
output[ -com_0:, -com_1:] = psf_array[:com_0, :com_1]
return (output/output.sum()).astype(REAL)
def do_scale(image, scale, offset):
""" scales and offsets an image """
return (image+offset)*scale
def undo_scale(image, scale, offset):
""" recovers image undoing scale and offset """
return image/scale - offset
def prescale(image, tmin = 1./65535, tmax = 1.0):
"""
Set image to be between tmin and tmax
"""
mx = image.max()*1.0
mn = image.min()*1.0
if (mx-mn) == 0:
scale = 1
else:
scale = 1.0*(tmax - tmin)/(mx - mn)
offset = tmin - mn
scale = 1.0*(tmax - tmin)/(mx - mn)
return do_scale( image, scale, offset), scale, offset
def score( obs, calc):
""" See how well our model matches the data """
diff = (obs-calc)
d2 = (diff*diff).ravel()
print("score",np.sqrt(d2.mean()), end=' ')
class RichardsonLucy(object):
def __init__(self, conv, debug=True):
"""
conv is a convolver object
"""
self.convolver = conv
self.debug = debug
def deblur( self, image, niter = 10 ):
"""
Runs the algorithm to deblur an image
"""
assert image.shape == self.convolver.dims
obs, scale, offset = prescale(image) # obs [0->1]
perfect = obs.copy()
import time
calc = obs.copy()
np.seterr('warn')
for i in range(niter):
start = time.time()
if self.debug: print("RL: iter",i, end=' ')
calc = self.convolver.convolve( perfect )
if self.debug: score( obs, calc )
calc[:] = np.where( calc < 1./65535, 1./65535, calc)
np.divide( obs, calc, calc )
correction = self.convolver.convolve_transp( calc )
np.multiply( perfect, correction, perfect)
print("%.3f"%(time.time()-start))
rescaled = undo_scale(perfect, scale, offset)
return rescaled
def gauss2d( dims, sig):
""" 2D Gaussian """
x = np.outer( np.ones(dims[0], REAL), np.arange(dims[1]))-dims[1]/2
y = np.outer( np.arange(dims[0]), np.ones(dims[1], REAL))-dims[0]/2
arg = x * x + y * y
return np.exp( -arg / sig / sig )
def dodeconv( filename_in,
filename_bg,
extra_bg,
filename_out,
pointspread_image,
niter = 20):
from fabio.openimage import openimage
im_in = openimage( filename_in)
im_bg = openimage( filename_bg)
# do background subtraction
im_in.data = im_in.data.astype(REAL) - im_bg.data - float(extra_bg)
im_in.data = np.where( im_in.data > 0 , im_in.data, 0 )
# conv does the image convolution
conv = convolver( pointspread_image, im_in.data.shape )
# rl does the deconvolution
rl = RichardsonLucy( conv )
#
im_in.data = rl.deblur( im_in.data, niter = niter ).astype( np.uint16 )
im_in.write( filename_out )
############# test stuff
def roughly(x, target):
return abs(x - target) < 1e-10
def pa(a, name):
""" debugging routine to print an array, name
and summary info
"""
ar = a.ravel()
print(name,ar.min(), ar.max(), ar.sum()/ar.shape[0])
def test_centre_psf( ):
""" check it does what we want for shifting to 0,0 """
ps = np.zeros((10,10))
ps[0,0] = 1
ret = centre_psf( ps, (100 ,100))
assert roughly(np.ravel(ret).sum(),1)
assert roughly(ret[0,0], 1)
ps = np.zeros((10,10))
ps[5,5] = 1
ret = centre_psf( ps, (12 ,15))
assert roughly(np.ravel(ret).sum(), 1)
assert roughly(ret[0,0], 1)
ps = np.zeros((7,9))
ps[4,5] = 8
ps[5,5] = 10
ps[6,5] = 8
ps[5,4] = 4
ps[5,6] = 4
ret = centre_psf( ps, (30 ,40))
assert roughly(np.ravel(ret).sum(), 1.0)
assert roughly(ret[0,0], 10./(8+10+8+4+4))
def test_prescale():
im = np.zeros((10,10))
im[5,5]=10
nim, scale, offset = prescale(im, tmin = 1./65535)
assert (nim[0,0] - 1./65535)<1./65535 , nim[0]
assert (nim[5,5] - 1.0) < 1./65535
def testg1():
gauss = np.exp( -(x*x+x.T*x.T)/10 )
dims = (1526,1024)
ret = centre_psf( gauss, dims )
from fabio.openimage import openimage
im = openimage("c_Al_s1_000__quantix_0037.edf")
d = im.data
figure(1)
show()
imshow(d)
colorbar()
show()
ret = centre_psf( gauss , d.shape)
rl = RichardsonLucy( gauss, d.shape)
c = rl.convolve( d )
def sm(x): return np.ravel(x).sum()
print(sm(c), sm(d), sm(gauss))
figure(2)
imshow( c)
colorbar()
input("press a key")
def testg5():
from fabio.openimage import openimage
im = openimage("c_Al_s1_000__quantix_0037.edf")
gaus = g( (50,60), 4.1 )+ g( (50,60), 10.1 )*0.1
from matplotlib.pylab import imshow, show
imshow(gaus)
show()
rl = RichardsonLucy( gaus, im.data.shape )
stuff = rl.deblur( im.data.copy() )
im.data = stuff
im.write("deblurred.edf",force_type=np.float32)
def run_tests():
test_centre_psf()
test_prescale()
# testg1()
testg5()
if __name__=="__main__":
# run_tests()
import sys, os
from fabio.openimage import openimage
def usage():
print(sys.argv[0],"infile bgfilw extra_bg outfile [psfile|Gaussian_sig] [niterations=20]")
sys.exit()
filename_in = sys.argv[1]
filename_bg = sys.argv[2]
extra_bg = sys.argv[3]
filename_out = sys.argv[4]
filename_pointspread = sys.argv[5]
try:
niter = int(sys.argv[6])
except:
niter = 20
if not os.path.exists(filename_in): usage()
if not os.path.exists(filename_bg): usage()
if os.path.exists(filename_pointspread):
psf = openimage( filename_pointspread ).data.astype(REAL)
psfr = psf.ravel()
# Scale, normalise
psf = (psf - psfr.min())/psfr.sum()
else:
try:
sig = float( filename_pointspread )
except:
usage()
dims = sig*10, sig*10
psf = gauss2d( dims, sig )
dodeconv( filename_in,
filename_bg,
extra_bg,
filename_out,
psf,
niter)
| gpl-2.0 |
ueshin/apache-spark | python/pyspark/pandas/tests/plot/test_frame_plot.py | 15 | 4733 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.pandas.plot import TopNPlotBase, SampledPlotBase, HistogramPlotBase
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DataFramePlotTest(PandasOnSparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
def test_missing(self):
psdf = ps.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
unsupported_functions = ["box", "hexbin"]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented".format(name)
):
getattr(psdf.plot, name)()
def test_topn_max_rows(self):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = TopNPlotBase().get_top_n(psdf)
self.assertEqual(len(data), 2000)
def test_sampled_plot_with_ratio(self):
with option_context("plotting.sample_ratio", 0.5):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2500, 1), 0.5)
def test_sampled_plot_with_max_rows(self):
# 'plotting.max_rows' is 2000
pdf = pd.DataFrame(np.random.rand(2000, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2000, 1), 1)
def test_compute_hist_single_column(self):
psdf = ps.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
expected_bins = np.linspace(1, 50, 11)
bins = HistogramPlotBase.get_bins(psdf[["a"]].to_spark(), 10)
expected_histogram = np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1])
histogram = HistogramPlotBase.compute_hist(psdf[["a"]], bins)[0]
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
self.assert_eq(pd.Series(expected_histogram, name="a"), histogram, almost=True)
def test_compute_hist_multi_columns(self):
expected_bins = np.linspace(1, 50, 11)
psdf = ps.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [50, 50, 30, 30, 30, 24, 10, 5, 4, 3, 1],
}
)
bins = HistogramPlotBase.get_bins(psdf.to_spark(), 10)
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
expected_histograms = [
np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1]),
np.array([4, 1, 0, 0, 1, 3, 0, 0, 0, 2]),
]
histograms = HistogramPlotBase.compute_hist(psdf, bins)
expected_names = ["a", "b"]
for histogram, expected_histogram, expected_name in zip(
histograms, expected_histograms, expected_names
):
self.assert_eq(
pd.Series(expected_histogram, name=expected_name), histogram, almost=True
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.plot.test_frame_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
theoryno3/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 121 | 3429 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
LarsDu/DeepNucDecomp | deepnuc/nucinference.py | 2 | 29422 | import tensorflow as tf
import numpy as np
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pprint
import numpy as np
import os
import sys
import glob
import dubiotools as dbt
from onehotseqmutator import OnehotSeqMutator
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from duseqlogo import LogoTools
import nucheatmap
import nucconvmodel
from collections import OrderedDict
import pickle
class NucInference(object):
"""
Base class for NucBinaryClassifier and NucRegressor
This class should contain all methods that work for both child classes.
This includes train(),save(),and load(). Child classes must contain
method eval_model_metrics()
build_model() should be different due to different loss functions and lack
of classification metrics.
"""
use_onehot_labels = True
def __init__(self,
sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob,
beta1,
concat_revcom_input,
nn_method_key):
self.sess = sess
self.train_batcher = train_batcher
self.test_batcher = test_batcher
self.seq_len = self.train_batcher.seq_len
self.num_epochs = num_epochs
self.learning_rate = learning_rate
self.batch_size = batch_size
self.seq_len = seq_len
self.save_dir = save_dir
self.summary_dir = self.save_dir+os.sep+'summaries'
self.checkpoint_dir = self.save_dir+os.sep+'checkpoints'
self.metrics_dir = self.save_dir+os.sep+'metrics'
#One minus the dropout_probability if dropout is enabled for a particular model
self.keep_prob = 0.5
#beta1 is a parameter for the AdamOptimizer
self.beta1 = beta1
#This flag will tell the inference method to concatenate
#the reverse complemented version of the input sequence
#to the input vector
self.concat_revcom_input = concat_revcom_input
self.nn_method_key = nn_method_key
self.nn_method = nucconvmodel.methods_dict[nn_method_key]
self.train_steps_per_epoch = int(self.train_batcher.num_records//self.batch_size)
if self.test_batcher:
self.test_steps_per_epoch = int(self.test_batcher.num_records//self.batch_size)
self.num_steps = int(self.train_steps_per_epoch*self.num_epochs)
self.save_on_epoch = 5 #This will be overrided in child class __init__
self.train_metrics_vector = [] #a list of metrics recorded on each save_on_epoch
self.test_metrics_vector =[]
self.epoch = 0
self.step=0
#http://stackoverflow.com/questions/43218731/
#Deprecated method of saving step on graph
#self.global_step = tf.Variable(0, trainable=False,name='global_step')
#Saver should be set in build_model() after all ops are declared
self.saver = None
def save(self):
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
#Save checkpoint in tensorflow
checkpoint_name = self.checkpoint_dir+os.sep+'checkpoints'
self.saver.save(self.sess,checkpoint_name,global_step=self.step)
#Save metrics using pickle in the metrics folder
if not os.path.exists(self.metrics_dir):
os.makedirs(self.metrics_dir)
metrics_file = self.metrics_dir+os.sep+'metrics-'+str(self.step)+'.p'
with open(metrics_file,'w') as of:
pickle.dump(self.train_metrics_vector,of)
pickle.dump(self.test_metrics_vector,of)
#Clean the metrics directory of old pickle files (to save storage space)
flist = glob.glob('*.p')
flist_steps = [int(f.strip('.p').split('-')[1]) for f in flist]
max_metric = max(flist_steps+[0])
for f in flist:
if max_metric != int(f.strip('.p').split('-')[1]):
os.remove(f)
def load(self,checkpoint_dir):
'''
Load saved model from checkpoint directory.
'''
print(" Retrieving checkpoints from", checkpoint_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess,ckpt.model_checkpoint_path)
print "\n\n\n\nSuccessfully loaded checkpoint from",ckpt.model_checkpoint_path
#Extract step from checkpoint filename
self.step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
self.epoch = int(self.step//self.train_steps_per_epoch)
self.load_pickle_metrics(self.step)
return True
else:
print ("Failed to load checkpoint",checkpoint_dir)
return False
def load_pickle_metrics(self,step):
#Load metrics from pickled metrics file
metrics_file = self.metrics_dir+os.sep+'metrics-'+str(self.step)+'.p'
with open(metrics_file,'r') as of:
self.train_metrics_vector = pickle.load(of)
if self.test_batcher:
self.test_metrics_vector = pickle.load(of)
print "Successfully loaded recorded metrics data from {}".\
format(metrics_file)
def train(self):
"""
Train a model
:returns: Tuple of two dicts: training metrics, and testing metrics
Note: This method was designed to work for both nucregressor and nucclassifier
However, those objects should have different eval_model_metrics() methods, since
classification and regression produce different metrics
"""
#coord = tf.train.Coordinator()
#threads = tf.train.start_queue_runners(self.sess.coord)
start_time = time.time()
#If model already finished training, just return last metrics
if self.step >= self.num_steps or self.epoch>self.num_epochs:
print "Loaded model already finished training"
print "Model was loaded at step {} epoch {} and num_steps set to {} and num epochs set to {}".format(self.step,self.epoch,self.num_steps,self.num_epochs)
#Important note: epochs go from 1 to num_epochs inclusive. The
# last epoch index is equal to num_epochs
for _ in xrange(self.epoch,self.num_epochs):
self.epoch += 1
for _ in xrange(self.train_steps_per_epoch):
self.step += 1
(labels_batch,dna_seq_batch) = self.train_batcher.pull_batch(self.batch_size)
feed_dict={
self.dna_seq_placeholder:dna_seq_batch,
self.labels_placeholder:labels_batch,
self.keep_prob_placeholder:self.keep_prob
}
_,loss_value,_ =self.sess.run([self.train_op, self.loss, self.logits],
feed_dict=feed_dict)
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if (self.step % self.train_steps_per_epoch == 0):
# Print status to stdout.
print('Epoch %d Step %d loss = %.4f (%.3f sec)' % (self.epoch, self.step,
loss_value,
duration))
#Writer summary
summary_str = self.sess.run(self.summary_op, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, self.step)
self.summary_writer.flush() #ensure summaries written to disk
#Save checkpoint and evaluate training and test sets
if ( self.epoch % self.save_on_epoch == 0
and self.epoch > 0
and self.epoch !=self.num_epochs
and self.step % self.train_steps_per_epoch == 0):
print('Training data eval:')
train_metrics=self.eval_model_metrics(self.train_batcher)
self.print_metrics(train_metrics)
self.train_metrics_vector.append(train_metrics)
if self.test_batcher != None:
print('Testing data eval:')
test_metrics=self.eval_model_metrics(self.test_batcher)
self.test_metrics_vector.append(test_metrics)
self.print_metrics(test_metrics)
print "Saving checkpoints"
#self.save()
if (self.epoch == self.num_epochs and self.step % self.train_steps_per_epoch ==0):
# This is the final step and epoch, save metrics
# Evaluate the entire training set.
print('Training data eval:')
#self.eval_model_accuracy(self.train_batcher)
self.train_metrics_vector.append( self.eval_model_metrics(self.train_batcher,
save_plots=True,
image_name='train_metrics.png'))
if self.test_batcher != None:
print('Testing data eval:')
self.test_metrics_vector.append(self.eval_model_metrics(self.test_batcher,
save_plots=True,
image_name='test_metrics.png'))
print "Saving final checkpoint"
self.save()
#Set return values
ret_metrics = []
if self.train_metrics_vector != []:
ret_metrics.append(self.train_metrics_vector[-1])
else:
ret_metrics.append([])
if self.test_metrics_vector != []:
ret_metrics.append(self.test_metrics_vector[-1])
else:
ret_metrics.append([])
return ret_metrics
def eval_batchers(self,save_plots=True):
# Evaluate training and test batcher data.
print('Training data eval:')
#self.eval_model_accuracy(self.train_batcher)
train_results_dict = self.eval_model_metrics(self.train_batcher,
save_plots=save_plots)
self.print_metrics(train_results_dict)
if self.test_batcher != None:
print('Testing data eval:')
test_results_dict = self.eval_model_metrics(self.test_batcher,
save_plots=save_plots)
self.print_metrics(test_results_dict)
def print_metrics(self,metrics_dict):
for key,value in metrics_dict.viewitems():
#Do not print out arrays!
if type(value) != np.ndarray:
print '\t',key,":\t",value
def eval_batch(self,dna_seq_batch,labels_batch):
""" Evaluate a single batch of labels and data """
feed_dict = {
self.dna_seq_placeholder: dna_seq_batch,
self.labels_placeholder: labels_batch,
self.keep_prob_placeholder: 1.0
}
batch_logits,batch_network = self.sess.run(self.nn_method,feed_dict=feed_dict)
return batch_logits,batch_network
def plot_test_epoch_vs_metric(self,
metric_key="auroc",
suffix = '',
save_plot=True,
xmin = 0.0,
ymin=0.5):
format_dict= {"auroc":"auROC","auPRC":"auprc","f1_score":"F1-Score"}
num_mets = len(self.test_metrics_vector)
if num_mets == 0:
print "Test metrics vector is empty!"
return None
met_y = [m[metric_key] for m in self.test_metrics_vector]
ep_x = [m["epoch"] for m in self.test_metrics_vector]
fig,ax = plt.subplots(1)
ax.plot(ep_x,met_y)
ax.set_xlabel("Number of epochs")
ax.set_xlim(xmin,ep_x[-1]+5)
ax.set_ylim(ymin,1.0)
if metric_key in format_dict:
ax.set_title("Epoch vs. {} {}".format(format_dict[metric_key],suffix))
ax.set_ylabel("{}".format(format_dict[metric_key]))
else:
ax.set_title("Epoch vs.{} {}".format(metric_key,suffix))
ax.set_ylabel("{}".format(metric_key))
if save_plot:
plot_file = self.save_dir+os.sep+"epoch_vs_{}_{}.png".format(metric_key,suffix)
fig.savefig(plot_file)
###Relevance batch methods###
def relevance_from_nucs(self,nucs,label):
"""
Return the relevance of a nucleotide sequence and corresponding label
:nuc_seq: string nucleotide sequence
:label: 1 x num_classes numpy indicator array
:returns: 4xn relevance matrix
:rtype: numpy array
"""
return self.run_relevance(dbt.seq_to_onehot(nuc_seq),label)
def run_relevance(self,onehot_seq,label):
"""
Return the relevance of a onehot encoded sequence and corresponding label
:onehot_seq: nx4 onehot representation of a nucleotide sequence
:label: 1 x num_classes numpy indicator array
:returns: 4xn relevance matrix
:rtype: numpy array
"""
feed_dict = {
self.dna_seq_placeholder: np.expand_dims(onehot_seq,axis=0),
self.labels_placeholder: np.expand_dims(label,axis=0),
self.keep_prob_placeholder: 1.0
}
relevance_batch = self.sess.run(self.relevance,feed_dict=feed_dict)
relevance = np.squeeze(relevance_batch[0],axis=0).T
return relevance
def relevance_from_batcher(self,batcher,index):
"""
:param batcher: DataBatcher object
:param index: integer index of item in DataBatcher object
:returns: 4xn relevance matrix
:rtype: numpy array
"""
batch_size=1 #Needs to be 1 for now due to conv2d_transpose issue
label, onehot_seq = batcher.pull_batch_by_index(index,batch_size)
rel_mat = self.run_relevance(onehot_seq[0],label[0])
return rel_mat
def plot_relevance_logo_from_batcher(self,batcher,index):
batch_size=1 #Needs to be 1 for now due to conv2d_transpose issue
label, onehot_seq = batcher.pull_batch_by_index(index,batch_size)
numeric_label = labels_batch[0].tolist().index(1)
save_fig = self.save_dir+os.sep+'relevance_logo_ind{}_lab{}.png'.format(index,
numeric_label)
self.plot_relevance_logo(onehot_seq,label,save_fig)
def plot_relevance_heatmap_from_batcher(self,batcher,index):
batch_size=1 #Needs to be 1 for now due to conv2d_transpose issue
labels_batch,dna_batch = batcher.pull_batch_by_index(index,batch_size)
numeric_label = labels_batch[0].tolist().index(1)
save_fig = self.save_dir+os.sep+'relevance_heat_ind{}_lab{}.png'.format(index,
numeric_label)
self.plot_relevance_heatmap(dna_batch[0],
labels_batch[0],
save_fig)
def plot_relevance_heatmap(self,onehot_seq,label,save_fig):
relevance = self.run_relevance(onehot_seq,label)
seq = dbt.onehot_to_nuc(onehot_seq.T)
fig,ax = nucheatmap.nuc_heatmap(seq,
relevance,
save_fig=save_fig,
clims = [0,np.max(relevance)],
cmap='Blues')
def plot_relevance_logo(self,onehot_seq,label,save_fig):
logosheets=[]
input_seqs=[]
np.set_printoptions(linewidth=500,precision=4)
save_file = self.save_dir+save_fig
relevance = self.relevance(onehot_seq,label)
r_img = np.squeeze(relevance).T
###Build a "relevance scaled position weight matrix"
#Convert each position to a position probability matrix
r_ppm = r_img/np.sum(r_img,axis=0)
lh = LogoTools.PwmTools.ppm_to_logo_heights(r_ppm)
#Relevance scale logo_heights
r_rel =np.sum(r_img,axis=0) #relavance by position
max_relevance = np.max(r_rel)
min_relevance = np.min(r_rel)
#print "r_rel max", max_relevance
#print "r_rel min", min_relevance
#lh is in bits of information
#Rescale logo_heights to r_rel
scaled_lh = lh * r_rel/(max_relevance - min_relevance)
logosheets.append(scaled_lh*25)
input_seqs.append(onehot_seq.T)
rel_sheet = LogoTools.LogoNucSheet(logosheets,input_seqs,input_type='heights')
rel_sheet.write_to_png(save_file)
#plt.pcolor(r_img,cmap=plt.cm.Reds)
#print "A relevance"
#plt.plot(r_img[0,:])
#print "Relevance by position"
#plt.plot(np.sum(r_img,axis=0))
#logits_np = self.sess.run(self.logits,
# feed_dict=feed_dict)
#Print actual label and inference if classification
#guess = logits_np.tolist()
#guess = guess[0].index(max(guess[0]))
#actual = labels_batch[0].tolist().index(1.)
#print logits_np
#print self.sess.run(self.probs,feed_dict=feed_dict)
#print ("Guess:",(guess))
#print ("Actual:",(actual))
##Alipanahi mut map methods###
def alipanahi_mutmap(self,onehot_seq,label):
"""
Create an matrix representing the effects of every
possible mutation on classification score as described in Alipanahi et al 2015
:onehot_seq: nx4 onehot representation of a nucleotide sequence
:label: 1 x num_classes numpy indicator array
:returns: nx4 mutation map numpy array
"""
#Mutate the pulled batch sequence.
#OnehotSeqMutator will produce every SNP for the input sequence
oh_iter = OnehotSeqMutator(onehot_seq.T) #4xn inputs
eval_batch_size = 75 #Number of generated examples to process in parallel
# with each step
single_pulls = oh_iter.n%eval_batch_size
num_whole_batches = int(oh_iter.n//eval_batch_size+single_pulls)
num_pulls = num_whole_batches+single_pulls
all_probs = np.zeros((oh_iter.n,self.num_classes))
for i in range(num_pulls):
if i<num_whole_batches:
iter_batch_size = eval_batch_size
else:
iter_batch_size=1
labels_batch = np.asarray(iter_batch_size*[label])
dna_seq_batch = oh_iter.pull_batch(iter_batch_size)
feed_dict = {
self.dna_seq_placeholder: dna_seq_batch,
self.labels_placeholder: labels_batch,
self.keep_prob_placeholder: 1.0
}
cur_probs = self.sess.run(self.probs,feed_dict=feed_dict)
#TODO: Map these values back to the original nuc array
if iter_batch_size > 1:
start_ind = iter_batch_size*i
elif iter_batch_size == 1:
start_ind = num_whole_batches*eval_batch_size+(i-num_whole_batches)
else:
print "Never reach this condition"
start_ind = iter_batch_size*i
all_probs[start_ind:start_ind+iter_batch_size,:] = cur_probs
#print "OHseqshape",onehot_seq.shape
seq_len = onehot_seq.shape[0]
amutmap_ds=np.zeros((seq_len,4))
label_index = label.tolist().index(1)
#Onehot seq mutator created SNPs in order
#Fill output matrix with logits except where nucleotides unchanged
#Remember onehot_seq is nx4 while nuc_heatmap takes inputs that are 4xn
ps_feed_dict = {
self.dna_seq_placeholder:np.expand_dims(onehot_seq,axis=0),
self.labels_placeholder: np.expand_dims(label,axis=0),
self.keep_prob_placeholder: 1.0
}
#ps is the original score of the original input sequence
ps = self.sess.run(self.probs,feed_dict=ps_feed_dict)[0][label_index]
k=0
for i in range(seq_len):
for j in range(4):
if onehot_seq[i,j] == 1:
amutmap_ds[i,j] = 0 #Set original letter to 0
else:
#ps_hat is the score for a given snp
ps_hat = all_probs[k,label_index]
amutmap_ds[i,j] = (ps_hat - ps)*max(0,ps_hat,ps)
k+=1
#amutmap_ds is nx4
return amutmap_ds
def alipanahi_mutmap_from_batcher(self,batcher,index):
label,onehot_seq = batcher.pull_batch_by_index(index,batch_size)
return self.alipanahi_mutmap(onehot_seq,label)
def plot_alipanahi_mutmap(self,onehot_seq,label,save_fig):
seq = dbt.onehot_to_nuc(onehot_seq.T)
amut_onehot = self.alipanahi_mutmap(onehot_seq,label)
nucheatmap.nuc_heatmap(seq,amut_onehot.T,save_fig=save_fig)
def plot_alipanahi_mutmap_from_batcher(self,batcher,index):
batch_size = 1
labels_batch, dna_seq_batch = batcher.pull_batch_by_index(index,batch_size)
#print "Index {} has label {}".format(index,labels_batch[0])
numeric_label = labels_batch[0].tolist().index(1)
save_fig = self.save_dir+os.sep+'alipanahi_mut_map_ind{}_lab{}.png'.format(index,
numeric_label)
self.plot_alipanahi_mutmap(dna_seq_batch[0],labels_batch[0],save_fig)
def avg_alipanahi_mutmap_of_batcher(self,batcher):
"""Get every mutmap from a given batcher, then average over all
mutation maps,
Works for num_classes = 2"""
all_labels, all_dna_seqs = batcher.pull_batch_by_index(0,batcher.num_records)
amutmaps = [np.zeros((batcher.num_records,self.seq_len,4))]*self.num_classes
for ci in range(self.num_classes):
for ri in range(batcher.num_records):
#double check this for errors
#amutmap is nx4
amutmaps[ci][ri,:,:] = self.alipanahi_mutmap(all_dna_seqs[ri,:,:],all_labels[ri,:])
return [np.mean(amutmap,axis=0) for amutmap in amutmaps]
def plot_avg_alipanahi_mutmap_of_batcher(self,batcher,fsuffix=''):
amutmaps = self.avg_alipanahi_mutmap_of_batcher(batcher)
for i,amap in enumerate(amutmaps):
# Note: amax(arr, axis=1) give greatest val for each row (nuc for nx4)
max_avg_nuc =(amap == np.amax(amap,axis=1,keepdims=True)).astype(np.float32)
seq = dbt.onehot_to_nuc(max_avg_nuc.T)
alipanahi_mutmap_dir = self.save_dir + os.sep+'alipanahi_mutmap_dir'
if not os.path.exists(alipanahi_mutmap_dir):
os.makedirs(alipanahi_mutmap_dir)
save_fname = alipanahi_mutmap_dir+os.sep+'avg_batcher_mutmap_{}recs_class{}{}.png'.\
format(batcher.num_records,i,fsuffix)
nucheatmap.nuc_heatmap(seq,amap.T,save_fig=save_fname)
###Mutmap methods###
# generate every possible snp and measure change in logit
def mutmap(self,onehot_seq,label):
"""
:onehot_seq: nx4 onehot representation of a nucleotide sequence
:label: integer numeric label (ie: 0 or 1 for a NucBinaryClassifier)
Create an matrix representing the effects of every
possible mutation on classification score as described in Alipanahi et al 2015
"""
#Mutate the pulled batch sequence.
#OnehotSeqMutator will produce every SNP for the input sequence
oh_iter = OnehotSeqMutator(onehot_seq.T) #4xn inputs
eval_batch_size = 75 #Number of generated examples to process in parallel
# with each step
single_pulls = oh_iter.n%eval_batch_size
num_whole_batches = int(oh_iter.n//eval_batch_size+single_pulls)
num_pulls = num_whole_batches+single_pulls
all_logits = np.zeros((oh_iter.n,self.num_classes))
for i in range(num_pulls):
if i<num_whole_batches:
iter_batch_size = eval_batch_size
else:
iter_batch_size=1
labels_batch = np.asarray(iter_batch_size*[label])
dna_seq_batch = oh_iter.pull_batch(iter_batch_size)
feed_dict = {
self.dna_seq_placeholder: dna_seq_batch,
self.labels_placeholder: labels_batch,
self.keep_prob_placeholder: 1.0
}
cur_logits = self.sess.run(self.logits,feed_dict=feed_dict)
#TODO: Map these values back to the original nuc array
if iter_batch_size > 1:
start_ind = iter_batch_size*i
elif iter_batch_size == 1:
start_ind = num_whole_batches*eval_batch_size+(i-num_whole_batches)
else:
print "Never reach this condition"
start_ind = iter_batch_size*i
all_logits[start_ind:start_ind+iter_batch_size,:] = cur_logits
#print "OHseqshape",onehot_seq.shape
seq_len = onehot_seq.shape[0]
mutmap_ds=np.zeros((seq_len,4))
k=0
label_index = label.tolist().index(1)
#Onehot seq mutator created SNPs in order
#Fill output matrix with logits except where nucleotides unchanged
#Remember onehot_seq is nx4 while nuc_heatmap takes inputs that are 4xn
for i in range(seq_len):
for j in range(4):
if onehot_seq[i,j] == 1:
mutmap_ds[i,j] = 0 #Set original letter to 0
else:
mutmap_ds[i,j] = all_logits[k,label_index]
k+=1
return mutmap_ds.T
def mutmap_from_batcher(self,batcher,index):
"""
Create an matrix representing the effects of every
possible mutation on classification score as described in Alipanahi et al 2015.
Retrieve this data from a databatcher
"""
label, onehot_seq = batcher.pull_batch_by_index(index,batch_size=1)
return self.mutmap(onehot_seq,label)
def plot_mutmap(self,onehot_seq,label,save_fig):
"""
:param onehot_seq: nx4 matrix
:param label:
:returns:
:rtype:
"""
seq = dbt.onehot_to_nuc(onehot_seq.T)
mut_onehot = self.mutmap(onehot_seq,label)
#print "mut_onehot",mut_onehot.shape
#print mut_onehot
return nucheatmap.nuc_heatmap(seq,mut_onehot,save_fig=save_fig)
def plot_mutmap_from_batcher(self,batcher,index):
batch_size = 1
labels_batch, dna_seq_batch = batcher.pull_batch_by_index(index,batch_size)
#print "Index {} has label {}".format(index,labels_batch[0])
numeric_label = labels_batch[0].tolist().index(1)
save_fig = self.save_dir+os.sep+'mut_map_ind{}_lab{}.png'.format(index,numeric_label)
return self.plot_mutmap(dna_seq_batch[0],labels_batch[0],save_fig)
#################
def print_global_variables(self):
"""Print all variable names from the current graph"""
print "Printing global_variables"
gvars = list(tf.global_variables())
for var in gvars:
print "Variable name",var.name
print self.sess.run(var)
def get_optimal_metrics(self,metrics_vector, metric_key="auroc"):
"""
Get the metrics from the epoch where a given metric was at it maximum
"""
best_val = 0
for metric in metrics_vector:
best_val = max(metric[metric_key],best_val)
for metric in metrics_vector:
#metric here is an OrderedDict of metrics
if metric[metric_key]==best_val:
return metric
| gpl-3.0 |
MohammedWasim/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
mwaskom/seaborn | seaborn/tests/test_relational.py | 2 | 56589 | from distutils.version import LooseVersion
from itertools import product
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import same_color, to_rgba
import pytest
from numpy.testing import assert_array_equal
from ..palettes import color_palette
from ..relational import (
_RelationalPlotter,
_LinePlotter,
_ScatterPlotter,
relplot,
lineplot,
scatterplot
)
from ..utils import _draw_figure
from .._testing import assert_plots_equal
@pytest.fixture(params=[
dict(x="x", y="y"),
dict(x="t", y="y"),
dict(x="a", y="y"),
dict(x="x", y="y", hue="y"),
dict(x="x", y="y", hue="a"),
dict(x="x", y="y", size="a"),
dict(x="x", y="y", style="a"),
dict(x="x", y="y", hue="s"),
dict(x="x", y="y", size="s"),
dict(x="x", y="y", style="s"),
dict(x="x", y="y", hue="a", style="a"),
dict(x="x", y="y", hue="a", size="b", style="b"),
])
def long_semantics(request):
return request.param
class Helpers:
# TODO Better place for these?
def scatter_rgbs(self, collections):
rgbs = []
for col in collections:
rgb = tuple(col.get_facecolor().squeeze()[:3])
rgbs.append(rgb)
return rgbs
def paths_equal(self, *args):
equal = all([len(a) == len(args[0]) for a in args])
for p1, p2 in zip(*args):
equal &= np.array_equal(p1.vertices, p2.vertices)
equal &= np.array_equal(p1.codes, p2.codes)
return equal
class SharedAxesLevelTests:
def test_color(self, long_df):
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C0")
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", ax=ax)
self.func(data=long_df, x="x", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C1")
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", color="C2", ax=ax)
assert self.get_last_color(ax) == to_rgba("C2")
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", c="C2", ax=ax)
assert self.get_last_color(ax) == to_rgba("C2")
class TestRelationalPlotter(Helpers):
def test_wide_df_variables(self, wide_df):
p = _RelationalPlotter()
p.assign_variables(data=wide_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.product(wide_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(wide_df.index, wide_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_df.to_numpy().ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(wide_df.columns.to_numpy(), wide_df.shape[0])
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == wide_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == wide_df.columns.name
assert p.variables["style"] == wide_df.columns.name
def test_wide_df_with_nonnumeric_variables(self, long_df):
p = _RelationalPlotter()
p.assign_variables(data=long_df)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
numeric_df = long_df.select_dtypes("number")
assert len(p.plot_data) == np.product(numeric_df.shape)
x = p.plot_data["x"]
expected_x = np.tile(numeric_df.index, numeric_df.shape[1])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = numeric_df.to_numpy().ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(
numeric_df.columns.to_numpy(), numeric_df.shape[0]
)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] == numeric_df.index.name
assert p.variables["y"] is None
assert p.variables["hue"] == numeric_df.columns.name
assert p.variables["style"] == numeric_df.columns.name
def test_wide_array_variables(self, wide_array):
p = _RelationalPlotter()
p.assign_variables(data=wide_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
assert len(p.plot_data) == np.product(wide_array.shape)
nrow, ncol = wide_array.shape
x = p.plot_data["x"]
expected_x = np.tile(np.arange(nrow), ncol)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = wide_array.ravel(order="f")
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(ncol), nrow)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_flat_array_variables(self, flat_array):
p = _RelationalPlotter()
p.assign_variables(data=flat_array)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == np.product(flat_array.shape)
x = p.plot_data["x"]
expected_x = np.arange(flat_array.shape[0])
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_array
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_list_variables(self, flat_list):
p = _RelationalPlotter()
p.assign_variables(data=flat_list)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_list)
x = p.plot_data["x"]
expected_x = np.arange(len(flat_list))
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_list
assert_array_equal(y, expected_y)
assert p.variables["x"] is None
assert p.variables["y"] is None
def test_flat_series_variables(self, flat_series):
p = _RelationalPlotter()
p.assign_variables(data=flat_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y"]
assert len(p.plot_data) == len(flat_series)
x = p.plot_data["x"]
expected_x = flat_series.index
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = flat_series
assert_array_equal(y, expected_y)
assert p.variables["x"] is flat_series.index.name
assert p.variables["y"] is flat_series.name
def test_wide_list_of_series_variables(self, wide_list_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_series)
chunk_size = max(len(l) for l in wide_list_of_series)
assert len(p.plot_data) == chunks * chunk_size
index_union = np.unique(
np.concatenate([s.index for s in wide_list_of_series])
)
x = p.plot_data["x"]
expected_x = np.tile(index_union, chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"]
expected_y = np.concatenate([
s.reindex(index_union) for s in wide_list_of_series
])
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
series_names = [s.name for s in wide_list_of_series]
expected_hue = np.repeat(series_names, chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_arrays_variables(self, wide_list_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_arrays)
chunk_size = max(len(l) for l in wide_list_of_arrays)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_arrays)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_list_of_list_variables(self, wide_list_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_list_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_list_of_lists)
chunk_size = max(len(l) for l in wide_list_of_lists)
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(wide_list_of_lists)
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(np.arange(chunks), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_series_variables(self, wide_dict_of_series):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_series)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_series)
chunk_size = max(len(l) for l in wide_dict_of_series.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_series.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_series), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_arrays_variables(self, wide_dict_of_arrays):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_arrays)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_arrays)
chunk_size = max(len(l) for l in wide_dict_of_arrays.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_arrays.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_arrays), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_wide_dict_of_lists_variables(self, wide_dict_of_lists):
p = _RelationalPlotter()
p.assign_variables(data=wide_dict_of_lists)
assert p.input_format == "wide"
assert list(p.variables) == ["x", "y", "hue", "style"]
chunks = len(wide_dict_of_lists)
chunk_size = max(len(l) for l in wide_dict_of_lists.values())
assert len(p.plot_data) == chunks * chunk_size
x = p.plot_data["x"]
expected_x = np.tile(np.arange(chunk_size), chunks)
assert_array_equal(x, expected_x)
y = p.plot_data["y"].dropna()
expected_y = np.concatenate(list(wide_dict_of_lists.values()))
assert_array_equal(y, expected_y)
hue = p.plot_data["hue"]
expected_hue = np.repeat(list(wide_dict_of_lists), chunk_size)
assert_array_equal(hue, expected_hue)
style = p.plot_data["style"]
expected_style = expected_hue
assert_array_equal(style, expected_style)
assert p.variables["x"] is None
assert p.variables["y"] is None
assert p.variables["hue"] is None
assert p.variables["style"] is None
def test_relplot_simple(self, long_df):
g = relplot(data=long_df, x="x", y="y", kind="scatter")
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
g = relplot(data=long_df, x="x", y="y", kind="line")
x, y = g.ax.lines[0].get_xydata().T
expected = long_df.groupby("x").y.mean()
assert_array_equal(x, expected.index)
assert y == pytest.approx(expected.values)
with pytest.raises(ValueError):
g = relplot(data=long_df, x="x", y="y", kind="not_a_kind")
def test_relplot_complex(self, long_df):
for sem in ["hue", "size", "style"]:
g = relplot(data=long_df, x="x", y="y", **{sem: "a"})
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(x, long_df["x"])
assert_array_equal(y, long_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df, x="x", y="y", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["size", "style"]:
g = relplot(
data=long_df, x="x", y="y", hue="b", col="c", **{sem: "a"}
)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
for sem in ["hue", "size", "style"]:
g = relplot(
data=long_df.sort_values(["c", "b"]),
x="x", y="y", col="b", row="c", **{sem: "a"}
)
grouped = long_df.groupby(["c", "b"])
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
@pytest.mark.parametrize(
"vector_type",
["series", "numpy", "list"],
)
def test_relplot_vectors(self, long_df, vector_type):
semantics = dict(x="x", y="y", hue="f", col="c")
kws = {key: long_df[val] for key, val in semantics.items()}
g = relplot(data=long_df, **kws)
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, grp_df["x"])
assert_array_equal(y, grp_df["y"])
def test_relplot_wide(self, wide_df):
g = relplot(data=wide_df)
x, y = g.ax.collections[0].get_offsets().T
assert_array_equal(y, wide_df.to_numpy().T.ravel())
def test_relplot_hues(self, long_df):
palette = ["r", "b", "g"]
g = relplot(
x="x", y="y", hue="a", style="b", col="c",
palette=palette, data=long_df
)
palette = dict(zip(long_df["a"].unique(), palette))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_hues = [palette[val] for val in grp_df["a"]]
assert same_color(points.get_facecolors(), expected_hues)
def test_relplot_sizes(self, long_df):
sizes = [5, 12, 7]
g = relplot(
data=long_df,
x="x", y="y", size="a", hue="b", col="c",
sizes=sizes,
)
sizes = dict(zip(long_df["a"].unique(), sizes))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_sizes = [sizes[val] for val in grp_df["a"]]
assert_array_equal(points.get_sizes(), expected_sizes)
def test_relplot_styles(self, long_df):
markers = ["o", "d", "s"]
g = relplot(
data=long_df,
x="x", y="y", style="a", hue="b", col="c",
markers=markers,
)
paths = []
for m in markers:
m = mpl.markers.MarkerStyle(m)
paths.append(m.get_path().transformed(m.get_transform()))
paths = dict(zip(long_df["a"].unique(), paths))
grouped = long_df.groupby("c")
for (_, grp_df), ax in zip(grouped, g.axes.flat):
points = ax.collections[0]
expected_paths = [paths[val] for val in grp_df["a"]]
assert self.paths_equal(points.get_paths(), expected_paths)
def test_relplot_stringy_numerics(self, long_df):
long_df["x_str"] = long_df["x"].astype(str)
g = relplot(data=long_df, x="x", y="y", hue="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
g = relplot(data=long_df, x="x", y="y", size="x_str")
points = g.ax.collections[0]
xys = points.get_offsets()
mask = np.ma.getmask(xys)
assert not mask.any()
assert_array_equal(xys, long_df[["x", "y"]])
def test_relplot_legend(self, long_df):
g = relplot(data=long_df, x="x", y="y")
assert g._legend is None
g = relplot(data=long_df, x="x", y="y", hue="a")
texts = [t.get_text() for t in g._legend.texts]
expected_texts = long_df["a"].unique()
assert_array_equal(texts, expected_texts)
g = relplot(data=long_df, x="x", y="y", hue="s", size="s")
texts = [t.get_text() for t in g._legend.texts]
assert_array_equal(texts, np.sort(texts))
g = relplot(data=long_df, x="x", y="y", hue="a", legend=False)
assert g._legend is None
palette = color_palette("deep", len(long_df["b"].unique()))
a_like_b = dict(zip(long_df["a"].unique(), long_df["b"].unique()))
long_df["a_like_b"] = long_df["a"].map(a_like_b)
g = relplot(
data=long_df,
x="x", y="y", hue="b", style="a_like_b",
palette=palette, kind="line", estimator=None,
)
lines = g._legend.get_lines()[1:] # Chop off title dummy
for line, color in zip(lines, palette):
assert line.get_color() == color
def test_relplot_data_columns(self, long_df):
long_df = long_df.assign(x_var=long_df["x"], y_var=long_df["y"])
g = relplot(
data=long_df,
x="x_var", y="y_var",
hue=long_df["a"].to_numpy(), col="c"
)
assert g.data.columns.to_list() == ["x_var", "y_var", "_hue_", "c"]
def test_facet_variable_collision(self, long_df):
# https://github.com/mwaskom/seaborn/issues/2488
col_data = long_df["c"]
long_df = long_df.assign(size=col_data)
g = relplot(
data=long_df,
x="x", y="y", col="size",
)
assert g.axes.shape == (1, len(col_data.unique()))
def test_ax_kwarg_removal(self, long_df):
f, ax = plt.subplots()
with pytest.warns(UserWarning):
g = relplot(data=long_df, x="x", y="y", ax=ax)
assert len(ax.collections) == 0
assert len(g.ax.collections) > 0
class TestLinePlotter(SharedAxesLevelTests, Helpers):
func = staticmethod(lineplot)
def get_last_color(self, ax):
return to_rgba(ax.lines[-1].get_color())
def test_legend_data(self, long_df):
f, ax = plt.subplots()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert handles == []
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
assert labels == p._hue_map.levels
assert colors == p._hue_map(p._hue_map.levels)
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
markers = [h.get_marker() for h in handles]
assert labels == p._hue_map.levels
assert labels == p._style_map.levels
assert colors == p._hue_map(p._hue_map.levels)
assert markers == p._style_map(p._style_map.levels, "marker")
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
markers = [h.get_marker() for h in handles]
expected_labels = (
["a"]
+ p._hue_map.levels
+ ["b"] + p._style_map.levels
)
expected_colors = (
["w"] + p._hue_map(p._hue_map.levels)
+ ["w"] + [".2" for _ in p._style_map.levels]
)
expected_markers = (
[""] + ["None" for _ in p._hue_map.levels]
+ [""] + p._style_map(p._style_map.levels, "marker")
)
assert labels == expected_labels
assert colors == expected_colors
assert markers == expected_markers
# --
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", size="a"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_color() for h in handles]
widths = [h.get_linewidth() for h in handles]
assert labels == p._hue_map.levels
assert labels == p._size_map.levels
assert colors == p._hue_map(p._hue_map.levels)
assert widths == p._size_map(p._size_map.levels)
# --
x, y = np.random.randn(2, 40)
z = np.tile(np.arange(20), 2)
p = _LinePlotter(variables=dict(x=x, y=y, hue=z))
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._hue_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._hue_map.levels)
p = _LinePlotter(variables=dict(x=x, y=y, size=z))
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._size_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "auto"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = True
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "bad_value"
with pytest.raises(ValueError):
p.add_legend_data(ax)
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, hue=z + 1),
legend="brief"
)
p.map_hue(norm=mpl.colors.LogNorm()),
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert float(labels[1]) / float(labels[0]) == 10
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, hue=z % 2),
legend="auto"
)
p.map_hue(norm=mpl.colors.LogNorm()),
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == ["0", "1"]
ax.clear()
p = _LinePlotter(
variables=dict(x=x, y=y, size=z + 1),
legend="brief"
)
p.map_size(norm=mpl.colors.LogNorm())
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert float(labels[1]) / float(labels[0]) == 10
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="f"),
legend="brief",
)
p.add_legend_data(ax)
expected_labels = ['0.20', '0.22', '0.24', '0.26', '0.28']
handles, labels = ax.get_legend_handles_labels()
assert labels == expected_labels
ax.clear()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size="f"),
legend="brief",
)
p.add_legend_data(ax)
expected_levels = ['0.20', '0.22', '0.24', '0.26', '0.28']
handles, labels = ax.get_legend_handles_labels()
assert labels == expected_levels
def test_plot(self, long_df, repeated_df):
f, ax = plt.subplots()
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
sort=False,
estimator=None
)
p.plot(ax, {})
line, = ax.lines
assert_array_equal(line.get_xdata(), long_df.x.to_numpy())
assert_array_equal(line.get_ydata(), long_df.y.to_numpy())
ax.clear()
p.plot(ax, {"color": "k", "label": "test"})
line, = ax.lines
assert line.get_color() == "k"
assert line.get_label() == "test"
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
sort=True, estimator=None
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
sorted_data = long_df.sort_values(["x", "y"])
assert_array_equal(line.get_xdata(), sorted_data.x.to_numpy())
assert_array_equal(line.get_ydata(), sorted_data.y.to_numpy())
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._hue_map.levels)
for line, level in zip(ax.lines, p._hue_map.levels):
assert line.get_color() == p._hue_map(level)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size="a"),
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._size_map.levels)
for line, level in zip(ax.lines, p._size_map.levels):
assert line.get_linewidth() == p._size_map(level)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(p._hue_map.levels)
assert len(ax.lines) == len(p._style_map.levels)
for line, level in zip(ax.lines, p._hue_map.levels):
assert line.get_color() == p._hue_map(level)
assert line.get_marker() == p._style_map(level, "marker")
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
levels = product(p._hue_map.levels, p._style_map.levels)
expected_line_count = len(p._hue_map.levels) * len(p._style_map.levels)
assert len(ax.lines) == expected_line_count
for line, (hue, style) in zip(ax.lines, levels):
assert line.get_color() == p._hue_map(hue)
assert line.get_marker() == p._style_map(style, "marker")
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
estimator="mean", err_style="band", errorbar="sd", sort=True
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
expected_data = long_df.groupby("x").y.mean()
assert_array_equal(line.get_xdata(), expected_data.index.to_numpy())
assert np.allclose(line.get_ydata(), expected_data.to_numpy())
assert len(ax.collections) == 1
# Test that nans do not propagate to means or CIs
p = _LinePlotter(
variables=dict(
x=[1, 1, 1, 2, 2, 2, 3, 3, 3],
y=[1, 2, 3, 3, np.nan, 5, 4, 5, 6],
),
estimator="mean", err_style="band", errorbar="ci", n_boot=100, sort=True,
)
ax.clear()
p.plot(ax, {})
line, = ax.lines
assert line.get_xdata().tolist() == [1, 2, 3]
err_band = ax.collections[0].get_paths()
assert len(err_band) == 1
assert len(err_band[0].vertices) == 9
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
estimator="mean", err_style="band", errorbar="sd"
)
ax.clear()
p.plot(ax, {})
assert len(ax.lines) == len(ax.collections) == len(p._hue_map.levels)
for c in ax.collections:
assert isinstance(c, mpl.collections.PolyCollection)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
estimator="mean", err_style="bars", errorbar="sd"
)
ax.clear()
p.plot(ax, {})
n_lines = len(ax.lines)
assert n_lines / 2 == len(ax.collections) == len(p._hue_map.levels)
assert len(ax.collections) == len(p._hue_map.levels)
for c in ax.collections:
assert isinstance(c, mpl.collections.LineCollection)
p = _LinePlotter(
data=repeated_df,
variables=dict(x="x", y="y", units="u"),
estimator=None
)
ax.clear()
p.plot(ax, {})
n_units = len(repeated_df["u"].unique())
assert len(ax.lines) == n_units
p = _LinePlotter(
data=repeated_df,
variables=dict(x="x", y="y", hue="a", units="u"),
estimator=None
)
ax.clear()
p.plot(ax, {})
n_units *= len(repeated_df["a"].unique())
assert len(ax.lines) == n_units
p.estimator = "mean"
with pytest.raises(ValueError):
p.plot(ax, {})
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
err_style="band", err_kws={"alpha": .5},
)
ax.clear()
p.plot(ax, {})
for band in ax.collections:
assert band.get_alpha() == .5
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
err_style="bars", err_kws={"elinewidth": 2},
)
ax.clear()
p.plot(ax, {})
for lines in ax.collections:
assert lines.get_linestyles() == 2
p.err_style = "invalid"
with pytest.raises(ValueError):
p.plot(ax, {})
x_str = long_df["x"].astype(str)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", hue=x_str),
)
ax.clear()
p.plot(ax, {})
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y", size=x_str),
)
ax.clear()
p.plot(ax, {})
def test_log_scale(self):
f, ax = plt.subplots()
ax.set_xscale("log")
x = [1, 10, 100]
y = [1, 2, 3]
lineplot(x=x, y=y)
line = ax.lines[0]
assert_array_equal(line.get_xdata(), x)
assert_array_equal(line.get_ydata(), y)
f, ax = plt.subplots()
ax.set_xscale("log")
ax.set_yscale("log")
x = [1, 1, 2, 2]
y = [1, 10, 1, 100]
lineplot(x=x, y=y, err_style="bars", errorbar=("pi", 100))
line = ax.lines[0]
assert line.get_ydata()[1] == 10
ebars = ax.collections[0].get_segments()
assert_array_equal(ebars[0][:, 1], y[:2])
assert_array_equal(ebars[1][:, 1], y[2:])
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
p = _LinePlotter(
data=long_df,
variables=dict(x="x", y="y"),
)
p.plot(ax1, {})
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "y"
p.plot(ax2, {})
assert ax2.get_xlabel() == "x"
assert ax2.get_ylabel() == "y"
assert not ax2.yaxis.label.get_visible()
def test_matplotlib_kwargs(self, long_df):
kws = {
"linestyle": "--",
"linewidth": 3,
"color": (1, .5, .2),
"markeredgecolor": (.2, .5, .2),
"markeredgewidth": 1,
}
ax = lineplot(data=long_df, x="x", y="y", **kws)
line, *_ = ax.lines
for key, val in kws.items():
plot_val = getattr(line, f"get_{key}")()
assert plot_val == val
def test_nonmapped_dashes(self):
ax = lineplot(x=[1, 2], y=[1, 2], dashes=(2, 1))
line = ax.lines[0]
# Not a great test, but lines don't expose the dash style publically
assert line.get_linestyle() == "--"
def test_lineplot_axes(self, wide_df):
f1, ax1 = plt.subplots()
f2, ax2 = plt.subplots()
ax = lineplot(data=wide_df)
assert ax is ax2
ax = lineplot(data=wide_df, ax=ax1)
assert ax is ax1
def test_lineplot_vs_relplot(self, long_df, long_semantics):
ax = lineplot(data=long_df, **long_semantics)
g = relplot(data=long_df, kind="line", **long_semantics)
lin_lines = ax.lines
rel_lines = g.ax.lines
for l1, l2 in zip(lin_lines, rel_lines):
assert_array_equal(l1.get_xydata(), l2.get_xydata())
assert same_color(l1.get_color(), l2.get_color())
assert l1.get_linewidth() == l2.get_linewidth()
assert l1.get_linestyle() == l2.get_linestyle()
def test_lineplot_smoke(
self,
wide_df, wide_array,
wide_list_of_series, wide_list_of_arrays, wide_list_of_lists,
flat_array, flat_series, flat_list,
long_df, missing_df, object_df
):
f, ax = plt.subplots()
lineplot(x=[], y=[])
ax.clear()
lineplot(data=wide_df)
ax.clear()
lineplot(data=wide_array)
ax.clear()
lineplot(data=wide_list_of_series)
ax.clear()
lineplot(data=wide_list_of_arrays)
ax.clear()
lineplot(data=wide_list_of_lists)
ax.clear()
lineplot(data=flat_series)
ax.clear()
lineplot(data=flat_array)
ax.clear()
lineplot(data=flat_list)
ax.clear()
lineplot(x="x", y="y", data=long_df)
ax.clear()
lineplot(x=long_df.x, y=long_df.y)
ax.clear()
lineplot(x=long_df.x, y="y", data=long_df)
ax.clear()
lineplot(x="x", y=long_df.y.to_numpy(), data=long_df)
ax.clear()
lineplot(x="x", y="t", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="b", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="a", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", style="b", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="a", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="s", data=long_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="a", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="a", size="s", data=missing_df)
ax.clear()
lineplot(x="x", y="y", hue="f", data=object_df)
ax.clear()
lineplot(x="x", y="y", hue="c", size="f", data=object_df)
ax.clear()
lineplot(x="x", y="y", hue="f", size="s", data=object_df)
ax.clear()
def test_ci_deprecation(self, long_df):
axs = plt.figure().subplots(2)
lineplot(data=long_df, x="x", y="y", errorbar=("ci", 95), seed=0, ax=axs[0])
with pytest.warns(UserWarning, match="The `ci` parameter is deprecated"):
lineplot(data=long_df, x="x", y="y", ci=95, seed=0, ax=axs[1])
assert_plots_equal(*axs)
axs = plt.figure().subplots(2)
lineplot(data=long_df, x="x", y="y", errorbar="sd", ax=axs[0])
with pytest.warns(UserWarning, match="The `ci` parameter is deprecated"):
lineplot(data=long_df, x="x", y="y", ci="sd", ax=axs[1])
assert_plots_equal(*axs)
class TestScatterPlotter(SharedAxesLevelTests, Helpers):
func = staticmethod(scatterplot)
def get_last_color(self, ax):
colors = ax.collections[-1].get_facecolors()
unique_colors = np.unique(colors, axis=0)
assert len(unique_colors) == 1
return to_rgba(unique_colors.squeeze())
def test_color(self, long_df):
super().test_color(long_df)
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", facecolor="C5", ax=ax)
assert self.get_last_color(ax) == to_rgba("C5")
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", facecolors="C6", ax=ax)
assert self.get_last_color(ax) == to_rgba("C6")
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
ax = plt.figure().subplots()
self.func(data=long_df, x="x", y="y", fc="C4", ax=ax)
assert self.get_last_color(ax) == to_rgba("C4")
def test_legend_data(self, long_df):
m = mpl.markers.MarkerStyle("o")
default_mark = m.get_path().transformed(m.get_transform())
m = mpl.markers.MarkerStyle("")
null = m.get_path().transformed(m.get_transform())
f, ax = plt.subplots()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert handles == []
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a"),
legend="full",
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
assert labels == p._hue_map.levels
assert same_color(colors, expected_colors)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
paths = [h.get_paths()[0] for h in handles]
expected_paths = p._style_map(p._style_map.levels, "path")
assert labels == p._hue_map.levels
assert labels == p._style_map.levels
assert same_color(colors, expected_colors)
assert self.paths_equal(paths, expected_paths)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
legend="full",
)
p.map_style(markers=True)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
paths = [h.get_paths()[0] for h in handles]
expected_colors = (
["w"] + p._hue_map(p._hue_map.levels)
+ ["w"] + [".2" for _ in p._style_map.levels]
)
expected_paths = (
[null] + [default_mark for _ in p._hue_map.levels]
+ [null] + p._style_map(p._style_map.levels, "path")
)
assert labels == (
["a"] + p._hue_map.levels + ["b"] + p._style_map.levels
)
assert same_color(colors, expected_colors)
assert self.paths_equal(paths, expected_paths)
# --
ax.clear()
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", size="a"),
legend="full"
)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
colors = [h.get_facecolors()[0] for h in handles]
expected_colors = p._hue_map(p._hue_map.levels)
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == p._hue_map.levels
assert labels == p._size_map.levels
assert same_color(colors, expected_colors)
assert sizes == expected_sizes
# --
ax.clear()
sizes_list = [10, 100, 200]
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", size="s"),
legend="full",
)
p.map_size(sizes=sizes_list)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == [str(l) for l in p._size_map.levels]
assert sizes == expected_sizes
# --
ax.clear()
sizes_dict = {2: 10, 4: 100, 8: 200}
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", size="s"),
legend="full"
)
p.map_size(sizes=sizes_dict)
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
sizes = [h.get_sizes()[0] for h in handles]
expected_sizes = p._size_map(p._size_map.levels)
assert labels == [str(l) for l in p._size_map.levels]
assert sizes == expected_sizes
# --
x, y = np.random.randn(2, 40)
z = np.tile(np.arange(20), 2)
p = _ScatterPlotter(
variables=dict(x=x, y=y, hue=z),
)
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._hue_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._hue_map.levels)
p = _ScatterPlotter(
variables=dict(x=x, y=y, size=z),
)
ax.clear()
p.legend = "full"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert labels == [str(l) for l in p._size_map.levels]
ax.clear()
p.legend = "brief"
p.add_legend_data(ax)
handles, labels = ax.get_legend_handles_labels()
assert len(labels) < len(p._size_map.levels)
ax.clear()
p.legend = "bad_value"
with pytest.raises(ValueError):
p.add_legend_data(ax)
def test_plot(self, long_df, repeated_df):
f, ax = plt.subplots()
p = _ScatterPlotter(data=long_df, variables=dict(x="x", y="y"))
p.plot(ax, {})
points = ax.collections[0]
assert_array_equal(points.get_offsets(), long_df[["x", "y"]].to_numpy())
ax.clear()
p.plot(ax, {"color": "k", "label": "test"})
points = ax.collections[0]
assert same_color(points.get_facecolor(), "k")
assert points.get_label() == "test"
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", hue="a")
)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
assert same_color(points.get_facecolors(), expected_colors)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", style="c"),
)
p.map_style(markers=["+", "x"])
ax.clear()
color = (1, .3, .8)
p.plot(ax, {"color": color})
points = ax.collections[0]
assert same_color(points.get_edgecolors(), [color])
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", size="a"),
)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_sizes = p._size_map(p.plot_data["size"])
assert_array_equal(points.get_sizes(), expected_sizes)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="a"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
expected_paths = p._style_map(p.plot_data["style"], "path")
assert same_color(points.get_facecolors(), expected_colors)
assert self.paths_equal(points.get_paths(), expected_paths)
p = _ScatterPlotter(
data=long_df,
variables=dict(x="x", y="y", hue="a", style="b"),
)
p.map_style(markers=True)
ax.clear()
p.plot(ax, {})
points = ax.collections[0]
expected_colors = p._hue_map(p.plot_data["hue"])
expected_paths = p._style_map(p.plot_data["style"], "path")
assert same_color(points.get_facecolors(), expected_colors)
assert self.paths_equal(points.get_paths(), expected_paths)
x_str = long_df["x"].astype(str)
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", hue=x_str),
)
ax.clear()
p.plot(ax, {})
p = _ScatterPlotter(
data=long_df, variables=dict(x="x", y="y", size=x_str),
)
ax.clear()
p.plot(ax, {})
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
p = _ScatterPlotter(data=long_df, variables=dict(x="x", y="y"))
p.plot(ax1, {})
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "y"
p.plot(ax2, {})
assert ax2.get_xlabel() == "x"
assert ax2.get_ylabel() == "y"
assert not ax2.yaxis.label.get_visible()
def test_scatterplot_axes(self, wide_df):
f1, ax1 = plt.subplots()
f2, ax2 = plt.subplots()
ax = scatterplot(data=wide_df)
assert ax is ax2
ax = scatterplot(data=wide_df, ax=ax1)
assert ax is ax1
def test_literal_attribute_vectors(self):
f, ax = plt.subplots()
x = y = [1, 2, 3]
s = [5, 10, 15]
c = [(1, 1, 0, 1), (1, 0, 1, .5), (.5, 1, 0, 1)]
scatterplot(x=x, y=y, c=c, s=s, ax=ax)
points, = ax.collections
assert_array_equal(points.get_sizes().squeeze(), s)
assert_array_equal(points.get_facecolors(), c)
def test_supplied_color_array(self, long_df):
cmap = mpl.cm.get_cmap("Blues")
norm = mpl.colors.Normalize()
colors = cmap(norm(long_df["y"].to_numpy()))
keys = ["c", "facecolor", "facecolors"]
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
keys.append("fc")
for key in keys:
ax = plt.figure().subplots()
scatterplot(data=long_df, x="x", y="y", **{key: colors})
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
ax = plt.figure().subplots()
scatterplot(data=long_df, x="x", y="y", c=long_df["y"], cmap=cmap)
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
def test_linewidths(self, long_df):
f, ax = plt.subplots()
scatterplot(data=long_df, x="x", y="y", s=10)
scatterplot(data=long_df, x="x", y="y", s=20)
points1, points2 = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
scatterplot(data=long_df, x="x", y="y", s=long_df["x"])
scatterplot(data=long_df, x="x", y="y", s=long_df["x"] * 2)
points1, points2 = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
scatterplot(data=long_df, x="x", y="y", size=long_df["x"])
scatterplot(data=long_df, x="x", y="y", size=long_df["x"] * 2)
points1, points2, *_ = ax.collections
assert (
points1.get_linewidths().item() < points2.get_linewidths().item()
)
ax.clear()
lw = 2
scatterplot(data=long_df, x="x", y="y", linewidth=lw)
assert ax.collections[0].get_linewidths().item() == lw
def test_size_norm_extrapolation(self):
# https://github.com/mwaskom/seaborn/issues/2539
x = np.arange(0, 20, 2)
f, axs = plt.subplots(1, 2, sharex=True, sharey=True)
slc = 5
kws = dict(sizes=(50, 200), size_norm=(0, x.max()), legend="brief")
scatterplot(x=x, y=x, size=x, ax=axs[0], **kws)
scatterplot(x=x[:slc], y=x[:slc], size=x[:slc], ax=axs[1], **kws)
assert np.allclose(
axs[0].collections[0].get_sizes()[:slc],
axs[1].collections[0].get_sizes()
)
legends = [ax.legend_ for ax in axs]
legend_data = [
{
label.get_text(): handle.get_sizes().item()
for label, handle in zip(legend.get_texts(), legend.legendHandles)
} for legend in legends
]
for key in set(legend_data[0]) & set(legend_data[1]):
if key == "y":
# At some point (circa 3.0) matplotlib auto-added pandas series
# with a valid name into the legend, which messes up this test.
# I can't track down when that was added (or removed), so let's
# just anticipate and ignore it here.
continue
assert legend_data[0][key] == legend_data[1][key]
def test_datetime_scale(self, long_df):
ax = scatterplot(data=long_df, x="t", y="y")
# Check that we avoid weird matplotlib default auto scaling
# https://github.com/matplotlib/matplotlib/issues/17586
ax.get_xlim()[0] > ax.xaxis.convert_units(np.datetime64("2002-01-01"))
def test_scatterplot_vs_relplot(self, long_df, long_semantics):
ax = scatterplot(data=long_df, **long_semantics)
g = relplot(data=long_df, kind="scatter", **long_semantics)
for s_pts, r_pts in zip(ax.collections, g.ax.collections):
assert_array_equal(s_pts.get_offsets(), r_pts.get_offsets())
assert_array_equal(s_pts.get_sizes(), r_pts.get_sizes())
assert_array_equal(s_pts.get_facecolors(), r_pts.get_facecolors())
assert self.paths_equal(s_pts.get_paths(), r_pts.get_paths())
def test_scatterplot_smoke(
self,
wide_df, wide_array,
flat_series, flat_array, flat_list,
wide_list_of_series, wide_list_of_arrays, wide_list_of_lists,
long_df, missing_df, object_df
):
f, ax = plt.subplots()
scatterplot(x=[], y=[])
ax.clear()
scatterplot(data=wide_df)
ax.clear()
scatterplot(data=wide_array)
ax.clear()
scatterplot(data=wide_list_of_series)
ax.clear()
scatterplot(data=wide_list_of_arrays)
ax.clear()
scatterplot(data=wide_list_of_lists)
ax.clear()
scatterplot(data=flat_series)
ax.clear()
scatterplot(data=flat_array)
ax.clear()
scatterplot(data=flat_list)
ax.clear()
scatterplot(x="x", y="y", data=long_df)
ax.clear()
scatterplot(x=long_df.x, y=long_df.y)
ax.clear()
scatterplot(x=long_df.x, y="y", data=long_df)
ax.clear()
scatterplot(x="x", y=long_df.y.to_numpy(), data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="b", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="a", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", style="b", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="a", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="s", data=long_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="a", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="a", size="s", data=missing_df)
ax.clear()
scatterplot(x="x", y="y", hue="f", data=object_df)
ax.clear()
scatterplot(x="x", y="y", hue="c", size="f", data=object_df)
ax.clear()
scatterplot(x="x", y="y", hue="f", size="s", data=object_df)
ax.clear()
| bsd-3-clause |
dparks1134/PETs | scripts/plotSilhouettes.py | 1 | 3493 | #!/usr/bin/env python
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2013'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import sys, argparse
from colorsys import hsv_to_rgb
from math import atan2, pi, sin, cos
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from numpy import mean
def doWork(args):
# read dissimilarity matrix
geneIdsToIndex = {}
dissMatrix = []
index = 0
for line in open(args.dissMatrix):
lineSplit = line.split('\t')
geneIdsToIndex[lineSplit[0]] = index
row = [float(x) for x in lineSplit[1:]]
dissMatrix.append(row)
index += 1
# read clustering data
clusterIdToGeneIds = {}
clusterIds = []
for line in open(args.cluster):
if line.strip() == '':
continue
if line[0] == '%':
clusterId = line[1:].strip()
clusterIds.append(clusterId)
else:
clusterIdToGeneIds[clusterId] = clusterIdToGeneIds.get(clusterId, []) + [line.strip()]
# create colour map for clusters
clusterIdToColour = {}
index = 0
for clusterId in sorted(clusterIds):
rgb = tuple([int(c*255) for c in hsv_to_rgb(float(index)/len(clusterIds), 0.4, 1.0)])
clusterIdToColour[clusterId] = '#%02X%02X%02X' % rgb
index += 1
# calculate silhouette value for each gene
silhouettes = {}
colours = []
for srcClusterId in clusterIdToGeneIds:
silhouettes[srcClusterId] = []
minInterDiss = 1e6
for srcGeneId in clusterIdToGeneIds[srcClusterId]:
colours.append(clusterIdToColour[srcClusterId])
row = dissMatrix[geneIdsToIndex[srcGeneId]]
for dstClusterId in clusterIdToGeneIds:
diss = []
for dstGeneId in clusterIdToGeneIds[dstClusterId]:
if srcGeneId == dstGeneId:
continue
diss.append(row[geneIdsToIndex[dstGeneId]])
meanDiss = mean(diss)
if srcClusterId == dstClusterId:
intraDiss = meanDiss
elif meanDiss < minInterDiss:
minInterDiss = meanDiss
s = (minInterDiss - intraDiss) / max(minInterDiss, intraDiss)
silhouettes[srcClusterId].append(s)
data = []
for clusterId in silhouettes:
sil = silhouettes[clusterId]
data += sorted(sil, reverse=True)
# plot results
fig = plt.figure()
fig.set_size_inches(args.width, args.width)
ax = fig.add_axes([0.13,0.08,.82,.87])
ax.set_title('Silhouettes (mean = %0.2f' % mean(data) + ')', fontsize=10)
ax.set_xlabel('Cluster', fontsize=10)
ax.set_ylabel('Silhouette', fontsize=10)
ind = np.arange(len(data))
ax.bar(ind, data, color = colours)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.savefig(args.output, dpi = args.dpi)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dissMatrix', help='Dissimilarity matrix.')
parser.add_argument('cluster', help='File indicating clustering of genes.')
parser.add_argument('output', help='Output image file. Specify format with extension: .jpg, .png, .pdf, .svg.')
parser.add_argument('--dpi', help='Resolution of output image (default = 600).', type=int, default=600)
parser.add_argument('-w', '--width', help='Width of image in inches (default = 6).', type=float, default=6)
parser.add_argument('-l', '--legend', help='Show legend.', action='store_true')
args = parser.parse_args()
doWork(args) | gpl-3.0 |
huzq/scikit-learn | sklearn/feature_selection/tests/test_base.py | 15 | 3668 | import numpy as np
import pytest
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection._base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, accept_sparse='csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert np.int32 == sel.transform(X.astype(np.int32)).dtype
assert np.float32 == sel.transform(X.astype(np.float32)).dtype
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert np.int32 == sel.transform(sparse(X).astype(np.int32)).dtype
assert np.float32 == sel.transform(sparse(X).astype(np.float32)).dtype
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert (np.int32 ==
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert (np.float32 ==
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert (np.int32 ==
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert (np.float32 ==
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
DGrady/pandas | pandas/tests/frame/test_nonunique_indexes.py | 13 | 18502 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas.compat import lrange, u
from pandas import DataFrame, Series, MultiIndex, date_range
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameNonuniqueIndexes(TestData):
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range('20130101', periods=4, freq='Q-NOV')
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['a', 'a', 'a', 'a'])
df.columns = idx
expected = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=['foo', 'bar', 'foo', 'hello'])
df['string'] = 'bah'
expected = DataFrame([[1, 1, 1, 5, 'bah'], [1, 1, 2, 5, 'bah'],
[2, 1, 3, 5, 'bah']],
columns=['foo', 'bar', 'foo', 'hello', 'string'])
check(df, expected)
with tm.assert_raises_regex(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1, 1, 1, 5, 'bah', 3], [1, 1, 2, 5, 'bah', 3],
[2, 1, 3, 5, 'bah', 3]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1, 1, 1, 5, 'bah', 4], [1, 1, 2, 5, 'bah', 4],
[2, 1, 3, 5, 'bah', 4]],
columns=['foo', 'bar', 'foo', 'hello',
'string', 'foo2'])
check(df, expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1, 1, 5, 'bah', 3], [1, 2, 5, 'bah', 3],
[2, 3, 5, 'bah', 3]],
columns=['foo', 'foo', 'hello', 'string', 'foo2'])
check(df, expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame([[1, 1, 'bah', 3], [1, 2, 'bah', 3],
[2, 3, 'bah', 3]],
columns=['foo', 'foo', 'string', 'foo2'])
check(df, expected)
# insert
df.insert(2, 'new_col', 5.)
expected = DataFrame([[1, 1, 5., 'bah', 3], [1, 2, 5., 'bah', 3],
[2, 3, 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col', 'string',
'foo2'])
check(df, expected)
# insert a dup
tm.assert_raises_regex(ValueError, 'cannot insert',
df.insert, 2, 'new_col', 4.)
df.insert(2, 'new_col', 4., allow_duplicates=True)
expected = DataFrame([[1, 1, 4., 5., 'bah', 3],
[1, 2, 4., 5., 'bah', 3],
[2, 3, 4., 5., 'bah', 3]],
columns=['foo', 'foo', 'new_col',
'new_col', 'string', 'foo2'])
check(df, expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4., 5., 'bah', 3], [4., 5., 'bah', 3],
[4., 5., 'bah', 3]],
columns=['new_col', 'new_col', 'string', 'foo2'])
assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame([[1, 1, 1., 5], [1, 1, 2., 5], [2, 1, 3., 5]],
columns=['foo', 'bar', 'foo', 'hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1, 1, 1., 5, 7.], [1, 1, 2., 5, 7.],
[2, 1, 3., 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
result = df['foo']
expected = DataFrame([[1, 1.], [1, 2.], [2, 3.]],
columns=['foo', 'foo'])
check(result, expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.],
['string', 1, 'string', 5, 7.]],
columns=['foo', 'bar', 'foo', 'hello', 'foo2'])
check(df, expected)
del df['foo']
expected = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], columns=[
'bar', 'hello', 'foo2'])
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=['x', 'x'])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930, 20121231, 20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples(
[(600809, 20120930),
(600809, 20121231),
(600809, 20130331)],
names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4, df5, how='inner', left_index=True, right_index=True)
result = k.rename(
columns={'TClose_x': 'TClose', 'TClose_y': 'QT_Close'})
str(result)
result.dtypes
expected = (DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809,
u('饡驦'), 30.01]],
columns=['RT', 'TClose', 'TExg',
'RPT_Date', 'STK_ID', 'STK_Name',
'QT_Close'])
.set_index(['STK_ID', 'RPT_Date'], drop=False))
assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
pytest.raises(ValueError, df.reindex, columns=['bar'])
pytest.raises(ValueError, df.reindex, columns=['bar', 'foo'])
# drop
df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]],
columns=['bar', 'a', 'a'])
result = df.drop(['a'], axis=1)
expected = DataFrame([[1], [1], [1]], columns=['bar'])
check(result, expected)
result = df.drop('a', axis=1)
check(result, expected)
# describe
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['bar', 'a', 'a'], dtype='float64')
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__', '__mul__', '__sub__', '__truediv__']:
df = DataFrame(dict(A=np.arange(10), B=np.random.rand(10)))
expected = getattr(df, op)(df)
expected.columns = ['A', 'A']
df.columns = ['A', 'A']
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop('C', axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A': np.random.randn(5),
'B': np.random.randn(5),
'C': np.random.randn(5),
'D': ['a', 'b', 'c', 'd', 'e']})
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=['A', 'B', 'C'], how='all')
expected.columns = ['A', 'A', 'B', 'C']
df.columns = ['A', 'A', 'B', 'C']
result = df.dropna(subset=['A', 'C'], how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result, expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df.C > 6]
check(result, expected)
# where
df = DataFrame(np.arange(12).reshape(3, 4), columns=[
'A', 'B', 'C', 'D'], dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
result = df[df > 6]
check(result, expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4),
columns=dups, dtype='float64')
pytest.raises(ValueError, lambda: df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]],
columns=['A', 'B'])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]],
columns=['A', 'A'])
# not-comparing like-labelled
pytest.raises(ValueError, lambda: df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False, True], [True, False], [False, False], [
True, False]], columns=['A', 'A'])
assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one': Series([True, True, False],
index=['a', 'b', 'c']),
'two': Series([False, False, True, False],
index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True],
index=['a', 'b', 'c', 'd'])})
expected = pd.concat(
[dfbool['one'], dfbool['three'], dfbool['one']], axis=1)
result = dfbool[['one', 'three', 'one']]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.loc[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5, 5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.loc[['a', 'c', 'a']]
check(result, expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': np.arange(1, 6, dtype='int64')},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(1, index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
df = DataFrame({'A': date_range('20130101', periods=5),
'B': date_range('20130101 09:00:00', periods=5)},
index=[2, 2, 3, 3, 4])
result = df.B - df.A
expected = Series(pd.Timedelta('9 hours'), index=[2, 2, 3, 3, 4])
assert_series_equal(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['a', 'a.1']
str(df)
expected = DataFrame([[1, 2]], columns=['a', 'a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a'])
df.columns = ['b', 'a', 'a.1']
str(df)
expected = DataFrame([[1, 2, 3]], columns=['b', 'a', 'a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=['a', 'a'])
df.columns = ['b', 'b']
str(df)
expected = DataFrame([[1, 2]], columns=['b', 'b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],
columns=['a', 'a', 'b', 'b', 'd', 'c', 'c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame(
[[1, 2, 1., 2., 3., 'foo', 'bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
pytest.raises(Exception, lambda x: DataFrame(
[[1, 2, 'foo', 'bar']], columns=['a', 'a', 'a', 'a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype='float64')
df_int = DataFrame(np.random.randn(10, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index,
columns=df_float.columns)
df_object = DataFrame('foo', index=df_float.index,
columns=df_float.columns)
df_dt = DataFrame(pd.Timestamp('20010101'),
index=df_float.index,
columns=df_float.columns)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._data._blknos) == len(df.columns)
assert len(df._data._blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_as_matrix_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
assert np.array_equal(result, expected)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list('AAA')
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
assert_series_equal(df.iloc[:, 1], expected)
def test_insert_with_columns_dups(self):
# GH 14291
df = pd.DataFrame()
df.insert(0, 'A', ['g', 'h', 'i'], allow_duplicates=True)
df.insert(0, 'A', ['d', 'e', 'f'], allow_duplicates=True)
df.insert(0, 'A', ['a', 'b', 'c'], allow_duplicates=True)
exp = pd.DataFrame([['a', 'd', 'g'], ['b', 'e', 'h'],
['c', 'f', 'i']], columns=['A', 'A', 'A'])
assert_frame_equal(df, exp)
| bsd-3-clause |
procoder317/scikit-learn | sklearn/semi_supervised/label_propagation.py | 71 | 15342 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
scollis/iris | docs/iris/example_code/graphics/COP_1d_plot.py | 7 | 3947 | """
Global average annual temperature plot
======================================
Produces a time-series plot of North American temperature forecasts for 2 different emission scenarios.
Constraining data to a limited spatial area also features in this example.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1 scenarios, both of which
were derived using the IMAGE Integrated Assessment Model (Johns et al. 2010; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2010) Climate change under aggressive mitigation: The ENSEMBLES multi-model
experiment. Climate Dynamics (submitted)
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. Royer, and P. van der Linden, 2009.
New Study For Climate Modeling, Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21.
.. seealso::
Further details on the aggregation functionality being used in this example can be found in
:ref:`cube-statistics`.
"""
import numpy as np
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
import iris.analysis.cartography
import matplotlib.dates as mdates
def main():
# Load data into three Cubes, one for each set of NetCDF files.
e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc'))
a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc'))
# load in the global pre-industrial mean temperature, and limit the domain
# to the same North American region that e1 and a1b are at.
north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315,
latitude=lambda v: 15 <= v <= 60)
pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'),
north_america)
# Generate area-weights array. As e1 and a1b are on the same grid we can
# do this just once and re-use. This method requires bounds on lat/lon
# coords, so let's add some in sensible locations using the "guess_bounds"
# method.
e1.coord('latitude').guess_bounds()
e1.coord('longitude').guess_bounds()
e1_grid_areas = iris.analysis.cartography.area_weights(e1)
pre_industrial.coord('latitude').guess_bounds()
pre_industrial.coord('longitude').guess_bounds()
pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
# Perform the area-weighted mean for each of the datasets using the
# computed grid-box areas.
pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=pre_grid_areas)
e1_mean = e1.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
a1b_mean = a1b.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
# Show ticks 30 years apart
plt.gca().xaxis.set_major_locator(mdates.YearLocator(30))
# Plot the datasets
qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue')
qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red')
# Draw a horizontal line showing the pre-industrial mean
plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed',
label='pre-industrial', lw=1.5)
# Establish where r and t have the same data, i.e. the observations
common = np.where(a1b_mean.data == e1_mean.data)[0]
observed = a1b_mean[common]
# Plot the observed data
qplt.plot(observed, label='observed', color='black', lw=1.5)
# Add a legend and title
plt.legend(loc="upper left")
plt.title('North American mean air temperature', fontsize=18)
plt.xlabel('Time / year')
plt.grid()
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
rampasek/seizure-prediction | features/entropy.py | 1 | 10855 | from __future__ import division
import warnings
import os, sys
import numpy as np
import scipy.spatial
import scipy.weave
import scipy.stats.kde
import matplotlib.pyplot as pp
import bisect
# apparently scipy.weave is depricated. I really shouldn't have used it.
# the best thing would probably be to port entropy_nn() -- or at least
# the subroutineroutine that's in weave -- to cython. The other methods
# are basically pure numpy, so they're not going to achieve any speedup
# in cython.
EULER_MASCHERONI = 0.57721566490153286060651209008240243104215933593992
"""
Author:
https://github.com/rmcgibbo/information/blob/master/entropy.py
Three estimators for the entropy of continuous random variables (one dimension)
entropy_bin() uses the common histogram approach. In addition to the data, it
requires specifying a bin width.
entropy_ci() uses the first order correlation integral, which is like a niave
kernel density estimator. In addition to the data, it required specifying a
neighborhood radius (kernel bandwidth), which analogous to a (half) bin width
for the histogram estimator.
entropy_nn uses the distribution of nearest neighbor distances. It requires no
adjustable parameters.
Using some simulations with various bandwidths, my experience is that the
nearest neighbor estimator has the lowest bias, but the highest variance. The
correlation integral estimator is probably the best, especially with a well
chosen neighbor radius. The histogram methods tends to underestimate the entropy.
I suspect a kernel density estimator using a gaussian kernel would be even better,
but that is not implemented. The entropy_ci() estimator uses basically a square
kernel.
"""
def entropy_bin(data, width):
"""Entropy of a 1D signal by binning
Data beyond three standard deviations from the mean is discarded.
Parameters
----------
data : array_like
The data, a 1D sample of samples of a random variable.
width : float
The bin width of the histogram.
Returns
-------
h : float
The estimated entropy
.. [4] Moddemeijer, R. "On estimation of entropy and mutual information of continuous distributions",
Signal Processing 16 233 (1989)
"""
#if int(n_bins) != n_bins:
# raise ValueError('n_bins must be an int, not %s' % n_bins)
# upper = int(np.max(data))
# lower = int(np.min(data))
# bins = np.arange(lower, upper, step=width)
# print bins, len(bins)
# #bins = np.linspace(lower, upper, n_bins+1)
# bin_widths = bins[1:] - bins[0:-1]
# try:
# counts, bins = np.histogram(data, bins)
# except Exception as e:
# print e
# print data, bins, width, upper, lower, np.arange(lower, upper, step=width), bin_widths
# return None
upper = float(np.max(data))
lower = float(np.min(data))
bins = np.arange(lower, upper, step=width)
#bins = np.linspace(lower, upper, n_bins+1)
bin_widths = bins[1:] - bins[0:-1]
counts, bins = np.histogram(data, bins)
p = counts / np.sum(counts)
# ignore zero entries, and we can't forget to apply the analytic correction
# for the bin width!
bin_widths = np.compress(list(p != 0.), bin_widths)
p = np.compress(list(p != 0.), p)
entropy = -np.sum(p*(np.log2(p) - np.log2(bin_widths)))
return entropy
def entropy_ke(data):
"""Estimate the entropy of a continuous 1D signal using a kernel approach
Ahmad, I., and Lin, P. "A nonparametric estimation of the entropy for
absolutely continuous distributions (Corresp.)," IEEE Trans. Inf. Theory,
22 375 (1976)
"""
pass
def entropy_nn(data, presorted=False):
"""Estimate the entropy of a continuous 1D signal using the distribution
of nearest neighbor distances
.. math::
H(x) = \frac{1}{n} \sum_i=1^n \ln(n*\rho_i) + \ln 2 + \gamma
Where `H(x)` is the entropy of the signal x, `n` is the length of the signal, `rho_i`
is the distance from `x_i` to its nearest neighbor `x_j` in the dataset, and gamma
is the Euler-Mascheroni constant
Parameters
----------
data : array_like, ndims=1, dtype=float64
A 1D continuous signal
presorted : boolean, optional
Is the `data` array presorted? The rate limiting step of this calculation is sorting
the data array. So if you've already sorted it, you can make this go a little faster
by passing true.
Returns
-------
h : float
The estimated entropy
[1] Beirlant, J. Dudewicz, E. J. Gyoerfi, L. Van der Meulen, E. C.,
"Nonparametric entropy estimation: An overview", Int. J. Math Stat. Sci.
6 17 (1997) http://jimbeck.caltech.edu/summerlectures/references/Entropy%20estimation.pdf
"""
if data.ndim != 1:
raise ValueError('Only 1D supported')
data = np.array(data, dtype=np.float64)
if not presorted:
data = np.sort(data)
n = len(data)
nearest_distances = np.zeros(n, dtype=np.float64)
# populate the array nearest_distances s.t.
# nd_i = \min{j < n; j \neq i} (|| data_i - data_j ||)
# or in otherwords, nearest_distances[i] gives the distance
# from data[i] to the other data point which it is nearest to
# we do this in nlogn time by sorting, but then to the iteration
# over the sorted array in c because python linear time is way longer
# than C nlog(n) for moderate n.
scipy.weave.inline(r'''
int i;
double distance, left_distance, right_distance;
// populate the end points manually
nearest_distances[0] = data[1] - data[0];
nearest_distances[n-1] = data[n-1] - data[n-2];
// iterate over the interior points, checking if they're closer to their
// left or right neighbor.
left_distance = nearest_distances[0];
for (i = 1; i < n - 1; i++) {
left_distance = right_distance;
right_distance = data[i + 1] - data[i];
distance = left_distance < right_distance ? left_distance : right_distance;
nearest_distances[i] = distance;
}
''', ['data', 'n', 'nearest_distances'])
return np.mean(np.log(n*nearest_distances)) + np.log(2) + EULER_MASCHERONI
def entropy_ci(data, radius, est_max_neighbors_within_radius=16):
"""Estimate the entropy of a continuous 1D signal using the generalized correlation integral
Parameters
----------
data : array_like
est_max_neighbors_within_radius : int, optional
Estimate of the maximum number of datapoints within the specified radius from any trial point.
# we need to find ALL the data points within radius, but there problem is
# that kdtree requires we set a number to find (k) in addition to the distance_upper_bound
# so we need to set the number large enough that we can really find all.
# but since it allocates arrays of size (n x k), we don't want it to be too big.
Returns
-------
h : float
The estimated entropy
References
----------
[2] Prichard, D. and Theiler, J. "Generalized Redundancies for Time Series Analysis", Physica D. 84 476 (1995)
http://arxiv.org/pdf/comp-gas/9405006.pdf
[3] Pawelzik, K. and Schuster, H. G; "Generalized dimensions and entropies from a measured time series",
Phys. Rev. A; 35 481 (1987)
"""
n = len(data)
if data.ndim != 1:
raise ValueError('Only 1D supported')
data = np.sort(data)
n_neighbors = np.zeros(n, dtype=np.int)
for i in xrange(n):
high = bisect.bisect_left(data, data[i] + radius, lo=i)
low = bisect.bisect_right(data, data[i] - radius, lo=0, hi=i)
# number of data points excluding i that are within data[i] - radius and data[i] + radius
n_neighbors[i] = high - low - 1
# DEBUG
# assert n_neighbors[i] == np.count_nonzero((data < data[i] + radius) & (data > data[i] - radius)) - 1
# assert np.all(data[low:high] < data[i] + radius)
# assert np.all(data[low:high] > data[i] - radius)
fraction_neighbors = n_neighbors / n
# exclude the bins where n_neighbors is zero
# equation 20, 22 in [2]
# note, the paper seems to have left out the log(radius) term, but it's pretty
# obvious that it's supposed to be there. It's very analogous to the histogram
# estimator. You will also see an obvious dependence of the mean of the entropy
# estimate on the bin width if you don't use it
entropy = -np.mean(np.compress(n_neighbors > 0, np.log(fraction_neighbors) - np.log(2*radius)))
return entropy
def main():
"""Generate some random samples and compare the entropy estimators. This will make some plots.
The idea is to run M trials where we generate N points and calculate their entropy. Then we plot,
for each estimator, the empirical distribution of the estimates over the M trials.
"""
n_trials = 150
n_pts = 10000
bin_entropies_01, nn_entropies, cgi_entropies_01 = [], [], []
bin_entropies_03, cgi_entropies_03 = [], []
bin_entropies_02, cgi_entropies_02 = [], []
#entropy_bin2(np.random.randn(1000), 30)
for i in range(n_trials):
print 'trial', i
#data = np.random.randn(n_pts)
data = np.random.exponential(size=n_pts)
nn_entropies.append(entropy_nn(data))
bin_entropies_01.append(entropy_bin(data, 0.05))
cgi_entropies_01.append(entropy_ci(data, 0.05))
bin_entropies_02.append(entropy_bin(data, 0.2))
cgi_entropies_02.append(entropy_ci(data, 0.2))
bin_entropies_03.append(entropy_bin(data, 0.3))
cgi_entropies_03.append(entropy_ci(data, 0.3))
pp.figure(figsize=(15,8))
plot_gkde(nn_entropies, label='nn_entropyes')
plot_gkde(cgi_entropies_01, label='cgi entropies 0.1')
plot_gkde(cgi_entropies_02, label='cgi entropies 0.2')
plot_gkde(cgi_entropies_03, label='cgi entropies 0.3')
plot_gkde(bin_entropies_01, label='bin entropies 0.1')
plot_gkde(bin_entropies_02, label='bin entropies 0.2')
plot_gkde(bin_entropies_03, label='bin entropies 0.3')
#analytic = 0.5*np.log(2*np.pi*np.e)
analytic = 1
print analytic
pp.plot([analytic, analytic], [0, 20], 'k', linewidth=5)
pp.legend()
pp.show()
def plot_gkde(data, *args, **kwargs):
"""Plot a gaussia kernel density estimator. *args and **kwargs will be passed
directory to pyplot.plot()"""
kde = scipy.stats.gaussian_kde(data)
lower = np.mean(data) - 3*np.std(data)
upper = np.mean(data) + 3*np.std(data)
x = np.linspace(lower, upper, 100)
y = kde(x)
pp.plot(x, y, *args, **kwargs)
if __name__ == '__main__':
main()
| gpl-2.0 |
convexopt/gpkit | gpkit/solution_array.py | 1 | 35733 | """Defines SolutionArray class"""
import re
from collections import Iterable
import cPickle as pickle
import numpy as np
from .nomials import NomialArray
from .small_classes import DictOfLists, Strings
from .small_scripts import mag, isnan
from .repr_conventions import unitstr
CONSTRSPLITPATTERN = re.compile(r"([^*]\*[^*])|( \+ )|( >= )|( <= )|( = )")
VALSTR_REPLACES = [
("+nan", " - "),
("nan", " - "),
("-nan", " - "),
("+0 ", " 0 "),
("+0.00 ", " 0.00 "),
("-0.00 ", " 0.00 "),
("+0.0% ", " 0.0 "),
("-0.0% ", " 0.0 ")
]
def senss_table(data, showvars=(), title="Sensitivities", **kwargs):
"Returns sensitivity table lines"
if "constants" in data.get("sensitivities", {}):
data = data["sensitivities"]["constants"]
if showvars:
data = {k: data[k] for k in showvars if k in data}
return var_table(data, title, sortbyvals=True,
valfmt="%+-.2g ", vecfmt="%+-8.2g",
printunits=False, minval=1e-3, **kwargs)
def topsenss_table(data, showvars, nvars=5, **kwargs):
"Returns top sensitivity table lines"
data, filtered = topsenss_filter(data, showvars, nvars)
title = "Most Sensitive" if not filtered else "Next Largest Sensitivities"
return senss_table(data, title=title, hidebelowminval=True, **kwargs)
def topsenss_filter(data, showvars, nvars=5):
"Filters sensitivities down to top N vars"
if "constants" in data.get("sensitivities", {}):
data = data["sensitivities"]["constants"]
mean_abs_senss = {k: np.abs(s).mean() for k, s in data.items()
if not isnan(s).any()}
topk = [k for k, _ in sorted(mean_abs_senss.items(), key=lambda l: l[1])]
filter_already_shown = showvars.intersection(topk)
for k in filter_already_shown:
topk.remove(k)
if nvars > 3: # always show at least 3
nvars -= 1
return {k: data[k] for k in topk[-nvars:]}, filter_already_shown
def insenss_table(data, _, maxval=0.1, **kwargs):
"Returns insensitivity table lines"
if "constants" in data.get("sensitivities", {}):
data = data["sensitivities"]["constants"]
data = {k: s for k, s in data.items() if np.mean(np.abs(s)) < maxval}
return senss_table(data, title="Insensitive Fixed Variables", **kwargs)
def tight_table(self, _, ntightconstrs=5, tight_senss=1e-2, **kwargs):
"Return constraint tightness lines"
if not self.model:
return []
title = "Tightest Constraints"
data = [(-float("%+6.2g" % c.relax_sensitivity) + 1e-30*hash(str(c)),
"%+6.2g" % c.relax_sensitivity, c)
for c in self.model.flat(constraintsets=False)
if c.relax_sensitivity >= tight_senss]
if not data:
lines = ["No constraints had a sensitivity above %+5.1g."
% tight_senss]
else:
data = sorted(data)[:ntightconstrs]
lines = constraint_table(data, **kwargs)
lines = [title] + ["-"*len(title)] + lines + [""]
if "sweepvariables" in self:
lines.insert(1, "(for the last sweep only)")
return lines
def loose_table(self, _, loose_senss=1e-5, **kwargs):
"Return constraint tightness lines"
if not self.model:
return []
title = "All Loose Constraints"
data = [(0, "", c) for c in self.model.flat(constraintsets=False)
if c.relax_sensitivity <= loose_senss]
if not data:
lines = ["No constraints had a sensitivity below %+6.2g."
% loose_senss]
else:
lines = constraint_table(data, **kwargs)
return [title] + ["-"*len(title)] + lines + [""]
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
def constraint_table(data, sortbymodels=True, showmodels=True, **_):
"Creates lines for tables where the right side is a constraint."
models = {}
decorated = []
for sortby, openingstr, constraint in data:
if sortbymodels and getattr(constraint, "naming", None):
model = "/".join([kstr + (".%i" % knum if knum != 0 else "")
for kstr, knum in zip(*constraint.naming)
if kstr])
else:
model = ""
if model not in models:
models[model] = len(models)
if showmodels:
constrstr = str(constraint)
if " at 0x" in constrstr: # don't print memory addresses
constrstr = constrstr[:constrstr.find(" at 0x")] + ">"
else:
try:
constrstr = constraint.str_without(["units", "models"])
except AttributeError:
constrstr = str(constraint)
decorated.append((models[model], model, sortby, constrstr, openingstr))
decorated.sort()
oldmodel = None
lines = []
for varlist in decorated:
_, model, _, constrstr, openingstr = varlist
if model not in models:
continue
if model != oldmodel and len(models) > 1:
if oldmodel is not None:
lines.append(["", ""])
lines.append([("modelname",), model])
oldmodel = model
if model and len(models) == 1: # fully remove
constrstr = constrstr.replace("_"+model, "")
else: # partially remove
constrstr = constrstr.replace(model, "")
minlen, maxlen = 25, 80
segments = [s for s in CONSTRSPLITPATTERN.split(constrstr) if s]
splitlines = []
line = ""
next_idx = 0
while next_idx < len(segments):
segment = segments[next_idx]
next_idx += 1
if CONSTRSPLITPATTERN.match(segment) and next_idx < len(segments):
segments[next_idx] = segment[1:] + segments[next_idx]
segment = segment[0]
elif len(line) + len(segment) > maxlen and len(line) > minlen:
splitlines.append(line)
line = " "
line += segment
while len(line) > maxlen:
splitlines.append(line[:maxlen])
line = " " + line[maxlen:]
splitlines.append(line)
lines += [(openingstr + " : ", splitlines[0])]
lines += [("", l) for l in splitlines[1:]]
maxlens = np.max([list(map(len, line)) for line in lines
if line[0] != ("modelname",)], axis=0)
dirs = ['>', '<']
# check lengths before using zip
assert len(list(dirs)) == len(list(maxlens))
fmts = [u'{0:%s%s}' % (direc, L) for direc, L in zip(dirs, maxlens)]
for i, line in enumerate(lines):
if line[0] == ("modelname",):
line = [fmts[0].format(" | "), line[1]]
else:
line = [fmt.format(s) for fmt, s in zip(fmts, line)]
lines[i] = "".join(line).rstrip()
return lines
def warnings_table(self, _, **kwargs):
"Makes a table for all warnings in the solution."
title = "Warnings"
lines = [title, "="*len(title)]
if "warnings" not in self or not self["warnings"]:
return []
for wtype in self["warnings"]:
lines += [wtype] + ["-"*len(wtype)]
data_vec = self["warnings"][wtype]
if not hasattr(data_vec, "shape"):
data_vec = [data_vec]
for i, data in enumerate(data_vec):
if len(data_vec) > 1:
lines += ["| for sweep %i |" % i]
if wtype == "Unexpectedly Tight Constraints" and data[0][1]:
data = [(-int(1e5*c.relax_sensitivity),
"%+6.2g" % c.relax_sensitivity, c) for _, c in data]
data = sorted(data)
lines += constraint_table(data, **kwargs)
elif wtype == "Unexpectedly Loose Constraints" and data[0][1]:
data = [(-int(1e5*c.rel_diff),
"%.4g %s %.4g" % c.tightvalues, c) for _, c in data]
data = sorted(data)
lines += constraint_table(data, **kwargs)
else:
for msg, _ in data:
lines += [msg, ""]
lines += [""]
return lines
TABLEFNS = {"sensitivities": senss_table,
"top sensitivities": topsenss_table,
"insensitivities": insenss_table,
"tightest constraints": tight_table,
"loose constraints": loose_table,
"warnings": warnings_table,
}
def reldiff(val1, val2):
"Relative difference between val1 and val2 (positive if val2 is larger)"
if hasattr(val1, "shape") or hasattr(val2, "shape") or val1.magnitude != 0:
if hasattr(val1, "shape") and val1.shape:
val1_dims = len(val1.shape)
if (hasattr(val2, "shape") and val1.shape != val2.shape
and val2.shape[:val1_dims] == val1.shape):
val1_ = np.tile(val1.magnitude, val2.shape[val1_dims:]+(1,)).T
val1 = val1_ * val1.units
# numpy division will warn but return infs
return (val2/val1 - 1).to("dimensionless").magnitude
elif val2.magnitude == 0: # both are scalar zeroes
return 0
return np.inf # just val1 is a scalar zero
class SolutionArray(DictOfLists):
"""A dictionary (of dictionaries) of lists, with convenience methods.
Items
-----
cost : array
variables: dict of arrays
sensitivities: dict containing:
monomials : array
posynomials : array
variables: dict of arrays
localmodels : NomialArray
Local power-law fits (small sensitivities are cut off)
Example
-------
>>> import gpkit
>>> import numpy as np
>>> x = gpkit.Variable("x")
>>> x_min = gpkit.Variable("x_{min}", 2)
>>> sol = gpkit.Model(x, [x >= x_min]).solve(verbosity=0)
>>>
>>> # VALUES
>>> values = [sol(x), sol.subinto(x), sol["variables"]["x"]]
>>> assert all(np.array(values) == 2)
>>>
>>> # SENSITIVITIES
>>> senss = [sol.sens(x_min), sol.sens(x_min)]
>>> senss.append(sol["sensitivities"]["variables"]["x_{min}"])
>>> assert all(np.array(senss) == 1)
"""
program = None
model = None
table_titles = {"sweepvariables": "Sweep Variables",
"freevariables": "Free Variables",
"constants": "Constants",
"variables": "Variables"}
def __len__(self):
try:
return len(self["cost"])
except TypeError:
return 1
except KeyError:
return 0
def __call__(self, posy):
posy_subbed = self.subinto(posy)
return getattr(posy_subbed, "c", posy_subbed)
def almost_equal(self, sol, reltol=1e-3, sens_abstol=0.01):
"Checks for almost-equality between two solutions"
selfvars = set(self["variables"])
solvars = set(sol["variables"])
if selfvars != solvars:
return False
for key in selfvars:
if abs(reldiff(self(key), sol(key))) >= reltol:
return False
if abs(sol["sensitivities"]["variables"][key]
- self["sensitivities"]["variables"][key]) >= sens_abstol:
return False
return True
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def diff(self, sol, showvars=None, min_percent=1.0,
show_sensitivities=True, min_senss_delta=0.1,
sortbymodel=True):
"""Outputs differences between this solution and another
Arguments
---------
sol : solution or string
Strings are treated as paths to valid pickled solutions
min_percent : float
The smallest percentage difference in the result to consider
show_sensitivities : boolean
if True, also computer sensitivity deltas
min_senss_delta : float
The smallest absolute difference in sensitivities to consider
Returns
-------
str
"""
if isinstance(sol, Strings):
sol = pickle.load(open(sol))
selfvars = set(self["variables"])
solvars = set(sol["variables"])
if showvars:
showvars = self._parse_showvars(showvars)
selfvars = set([k for k in showvars
if k in self["variables"]])
solvars = set([k for k in showvars
if k in sol["variables"]])
sol_diff = {}
for key in selfvars.intersection(solvars):
sol_diff[key] = 100*reldiff(self(key), sol(key))
lines = var_table(sol_diff, "Solution difference", sortbyvals=True,
valfmt="%+6.1f%% ", vecfmt="%+6.1f%% ",
printunits=False, minval=min_percent,
sortbymodel=sortbymodel)
if showvars:
lines[0] += " for variables given in `showvars`"
lines[1] += "----------------------------------"
if len(lines) > 3:
lines.insert(1, "(positive means the argument is bigger)")
elif sol_diff:
values = []
for v in sol_diff.values():
if hasattr(v, "shape"):
values.extend(v.flatten().tolist())
else:
values.append(v)
values = np.array(values)
i = np.unravel_index(np.argmax(np.abs(values)), values.shape)
lines.insert(2, "The largest difference is %g%%" % values[i])
if show_sensitivities:
senss_delta = {}
for key in selfvars.intersection(solvars):
if key in sol["sensitivities"]["variables"]:
val1 = self["sensitivities"]["variables"][key]
val2 = sol["sensitivities"]["variables"][key]
if hasattr(val1, "shape") and val1.shape:
val1_dims = len(val1.shape)
if (hasattr(val2, "shape") and val1.shape != val2.shape
and val2.shape[:val1_dims] == val1.shape):
val1 = np.tile(val1,
val2.shape[val1_dims:]+(1,)).T
senss_delta[key] = val2 - val1
elif key in sol["sensitivities"]["variables"]:
print("Key %s is not in this solution's sensitivities"
" but is in those of the argument.")
else: # for variables that just aren't in any constraints
senss_delta[key] = 0
primal_lines = len(lines)
lines += var_table(senss_delta, "Solution sensitivity delta",
sortbyvals=True,
valfmt="%+-6.2f ", vecfmt="%+-6.2f",
printunits=False, minval=min_senss_delta,
sortbymodel=sortbymodel)
if showvars:
lines[primal_lines] += " for variables given in `showvars`"
lines[primal_lines + 1] += "----------------------------------"
if len(lines) > primal_lines + 3:
lines.insert(
primal_lines + 1,
"(positive means the argument has a higher sensitivity)")
elif senss_delta:
absmaxvalue, maxvalue = 0, 0
for valarray in senss_delta.values():
if not getattr(valarray, "shape", None):
value = valarray
else:
flatvalarray = valarray.flatten()
value = flatvalarray[np.argmax(np.abs(valarray))]
absvalue = abs(value)
if absvalue > absmaxvalue:
maxvalue = value
absmaxvalue = absvalue
lines.insert(
primal_lines + 2,
"The largest sensitivity delta is %+g" % maxvalue)
if selfvars-solvars:
lines.append("Variable(s) of this solution"
" which are not in the argument:")
lines.append("\n".join(" %s" % key for key in selfvars-solvars))
lines.append("")
if solvars-selfvars:
lines.append("Variable(s) of the argument"
" which are not in this solution:")
lines.append("\n".join(" %s" % key for key in solvars-selfvars))
lines.append("")
out = "\n".join(lines)
out = out.replace("+0.", " +.")
out = out.replace("-0.", " -.")
return out
def pickle_prep(self):
"After calling this, the SolutionArray is ready to pickle"
program, model = self.program, self.model
self.program, self.model = None, None
cost = self["cost"]
self["cost"] = mag(cost)
warnings = {}
if "warnings" in self:
for wtype in self["warnings"]:
warnings[wtype] = self["warnings"][wtype]
warnarray = np.array(self["warnings"][wtype])
warnarray[:, :, 1] = None # remove pointer to exact constraint
self["warnings"][wtype] = warnarray.tolist()
return program, model, cost, warnings
def save(self, filename="solution.pkl"):
"""Pickles the solution and saves it to a file.
The saved solution is identical except for two things:
- the cost is made unitless
- the solution's 'program' attribute is removed
- the solution's 'model' attribute is removed
- the data field is removed from the solution's warnings
(the "message" field is preserved)
Solution can then be loaded with e.g.:
>>> import cPickle as pickle
>>> pickle.load(open("solution.pkl"))
"""
program, model, cost, warnings = self.pickle_prep()
pickle.dump(self, open(filename, "w"))
self["cost"], self["warnings"] = cost, warnings
self.program, self.model = program, model
def varnames(self, include):
"Returns list of variables, optionally with minimal unique names"
self["variables"].update_keymap()
keymap = self["variables"].keymap
names = {}
for key in (include or self["variables"]):
if include:
key, _ = self["variables"].parse_and_index(key)
keys = keymap[key.name]
names.update((str(k), k) for k in keys)
return names
def savemat(self, filename="solution.mat", include=None):
"Saves primal solution as matlab file"
from scipy.io import savemat
savemat(filename,
{name.replace("/", "_").replace(".", "__"):
np.array(self["variables"][key], "f")
for name, key in self.varnames(include).items()})
def todataframe(self, include=None):
"Returns primal solution as pandas dataframe"
import pandas as pd # pylint:disable=import-error
rows = []
cols = ["Name", "Index", "Value", "Units", "Label",
"Models", "Model Numbers", "Other"]
for _, key in sorted(self.varnames(include).items(),
key=lambda k: k[0]):
value = self["variables"][key]
if key.shape:
idxs = []
it = np.nditer(np.empty(key.shape), flags=['multi_index'])
while not it.finished:
idx = it.multi_index
idxs.append(idx[0] if len(idx) == 1 else idx)
it.iternext()
else:
idxs = [None]
for idx in idxs:
row = [
key.name,
"" if idx is None else idx,
value if idx is None else value[idx],
]
rows.append(row)
row.extend([
key.unitstr(),
key.label or "",
key.models or "",
key.modelnums or "",
", ".join("%s=%s" % (k, v) for (k, v) in key.descr.items()
if k not in ["name", "units", "unitrepr",
"idx", "shape", "veckey",
"value", "original_fn",
"models", "modelnums", "label"])
])
return pd.DataFrame(rows, columns=cols)
def savecsv(self, filename="solution.csv", include=None):
"Saves primal solution as csv"
df = self.todataframe(include)
df.to_csv(filename, index=False, encoding="utf-8")
def saveprettycsv(self, showvars=None, filename="solution.csv", valcols=5,
**kwargs):
"Saves primal solution as a CSV sorted by modelname, like the tables."
data = self["variables"]
if showvars:
showvars = self._parse_showvars(showvars)
data = {k: data[k] for k in showvars if k in data}
# if the columns don't capture any dimensions, skip them
minspan, maxspan = None, 1
for v in data.values():
if (getattr(v, "shape", None)
and not all(di == 1 for di in v.shape)):
minspan_ = min((di for di in v.shape if di != 1))
maxspan_ = min((di for di in v.shape if di != 1))
if minspan is None or minspan_ < minspan:
minspan = minspan_
if maxspan is None or maxspan_ > maxspan:
maxspan = maxspan_
if minspan is not None and minspan > valcols:
valcols = 1
if maxspan < valcols:
valcols = maxspan
lines = var_table(data, "", rawlines=True, maxcolumns=valcols,
**kwargs)
with open(filename, "w") as f:
f.write("Model Name,Variable Name,Value(s)" + ","*valcols
+ "Units,Description\n")
for line in lines:
if line[0] == ("modelname",):
f.write(line[1])
elif not line[1]: # spacer line
f.write("\n")
else:
f.write("," + line[0].replace(" : ", "") + ",")
vals = line[1].replace("[", "").replace("]", "").strip()
for el in vals.split():
f.write(el + ",")
f.write(","*(valcols - len(vals.split())))
f.write((line[2].replace("[", "").replace("]", "").strip()
+ ",").encode("utf8"))
f.write(line[3].strip() + "\n")
def subinto(self, posy):
"Returns NomialArray of each solution substituted into posy."
if posy in self["variables"]:
return self["variables"](posy)
elif not hasattr(posy, "sub"):
raise ValueError("no variable '%s' found in the solution" % posy)
elif len(self) > 1:
return NomialArray([self.atindex(i).subinto(posy)
for i in range(len(self))])
else:
return posy.sub(self["variables"])
def _parse_showvars(self, showvars):
showvars_out = set()
if showvars:
for k in showvars:
k, _ = self["variables"].parse_and_index(k)
keys = self["variables"].keymap[k]
showvars_out.update(keys)
return showvars_out
def summary(self, showvars=(), ntopsenss=5, **kwargs):
"Print summary table, showing top sensitivities and no constants"
showvars = self._parse_showvars(showvars)
out = self.table(showvars, ["cost", "warnings",
"sweepvariables", "freevariables"],
**kwargs)
constants_in_showvars = showvars.intersection(self["constants"])
senss_tables = []
if len(self["constants"]) < ntopsenss+2 or constants_in_showvars:
senss_tables.append("sensitivities")
if len(self["constants"]) >= ntopsenss+2:
senss_tables.append("top sensitivities")
senss_tables.append("tightest constraints")
senss_str = self.table(showvars, senss_tables, nvars=ntopsenss,
**kwargs)
if senss_str:
out += "\n" + senss_str
return out
def table(self, showvars=(),
tables=("cost", "warnings", "sweepvariables", "freevariables",
"constants", "sensitivities", "tightest constraints"),
**kwargs):
"""A table representation of this SolutionArray
Arguments
---------
tables: Iterable
Which to print of ("cost", "sweepvariables", "freevariables",
"constants", "sensitivities")
fixedcols: If true, print vectors in fixed-width format
latex: int
If > 0, return latex format (options 1-3); otherwise plain text
included_models: Iterable of strings
If specified, the models (by name) to include
excluded_models: Iterable of strings
If specified, model names to exclude
Returns
-------
str
"""
showvars = self._parse_showvars(showvars)
strs = []
for table in tables:
if table == "cost":
cost = self["cost"]
# pylint: disable=unsubscriptable-object
if kwargs.get("latex", None):
# TODO should probably print a small latex cost table here
continue
strs += ["\n%s\n----" % "Cost"]
if len(self) > 1:
costs = ["%-8.3g" % c for c in mag(cost[:4])]
strs += [" [ %s %s ]" % (" ".join(costs),
"..." if len(self) > 4 else "")]
else:
strs += [" %-.4g" % mag(cost)]
strs[-1] += unitstr(cost, into=" [%s]", dimless="")
strs += [""]
elif table in TABLEFNS:
strs += TABLEFNS[table](self, showvars, **kwargs)
elif table in self:
data = self[table]
if showvars:
showvars = self._parse_showvars(showvars)
data = {k: data[k] for k in showvars if k in data}
strs += var_table(data, self.table_titles[table], **kwargs)
if kwargs.get("latex", None):
preamble = "\n".join(("% \\documentclass[12pt]{article}",
"% \\usepackage{booktabs}",
"% \\usepackage{longtable}",
"% \\usepackage{amsmath}",
"% \\begin{document}\n"))
strs = [preamble] + strs + ["% \\end{document}"]
return "\n".join(strs)
def plot(self, posys=None, axes=None):
"Plots a sweep for each posy"
if len(self["sweepvariables"]) != 1:
print("SolutionArray.plot only supports 1-dimensional sweeps")
if not hasattr(posys, "__len__"):
posys = [posys]
for i, posy in enumerate(posys):
if posy in [None, "cost"]:
posys[i] = self.program[0].cost # pylint: disable=unsubscriptable-object
import matplotlib.pyplot as plt
from .interactive.plot_sweep import assign_axes
from . import GPBLU
(swept, x), = self["sweepvariables"].items()
posys, axes = assign_axes(swept, posys, axes)
for posy, ax in zip(posys, axes):
y = self(posy) if posy not in [None, "cost"] else self["cost"]
ax.plot(x, y, color=GPBLU)
if len(axes) == 1:
axes, = axes
return plt.gcf(), axes
# pylint: disable=too-many-statements,too-many-arguments
# pylint: disable=too-many-branches,too-many-locals
def var_table(data, title, printunits=True, fixedcols=True,
varfmt="%s : ", valfmt="%-.4g ", vecfmt="%-8.3g",
included_models=None, excluded_models=None, latex=False,
minval=0, sortbyvals=False, hidebelowminval=False,
columns=None, maxcolumns=5, rawlines=False,
sortbymodel=True, **_):
"""
Pretty string representation of a dict of VarKeys
Iterable values are handled specially (partial printing)
Arguments
---------
data: dict whose keys are VarKey's
data to represent in table
title: string
minval: float
skip values with all(abs(value)) < minval
printunits: bool
fixedcols: bool
if True, print rhs (val, units, label) in fixed-width cols
varfmt: string
format for variable names
valfmt: string
format for scalar values
vecfmt: string
format for vector values
latex: int
If > 0, return latex format (options 1-3); otherwise plain text
included_models: Iterable of strings
If specified, the models (by name) to include
excluded_models: Iterable of strings
If specified, model names to exclude
sortbyvals : boolean
If true, rows are sorted by their average value instead of by name.
"""
if not data:
return []
lines = []
decorated = []
models = set()
for i, (k, v) in enumerate(data.items()):
v_arr = np.array([v])
notnan = ~isnan(v_arr)
if notnan.any() and np.sum(np.abs(v_arr[notnan])) >= minval:
if minval and hidebelowminval and len(notnan.shape) > 1:
less_than_min = np.abs(v) <= minval
v[np.logical_and(~isnan(v), less_than_min)] = 0
b = isinstance(v, Iterable) and bool(v.shape)
kmodels = k.descr.get("models", [])
kmodelnums = k.descr.get("modelnums", [])
model = "/".join([kstr + (".%i" % knum if knum != 0 else "")
for kstr, knum in zip(kmodels, kmodelnums)
if kstr])
if not sortbymodel:
model = "null"
models.add(model)
s = k.str_without("models")
if not sortbyvals:
decorated.append((model, b, (varfmt % s), i, k, v))
else: # for consistent sorting, add small offset to negative vals
val = np.mean(np.abs(v)) - (1e-9 if np.mean(v) < 0 else 0)
val -= hash(k.name)*1e-30
decorated.append((model, -val, b, (varfmt % s), i, k, v))
if included_models:
included_models = set(included_models)
included_models.add("")
models = models.intersection(included_models)
if excluded_models:
models = models.difference(excluded_models)
decorated.sort()
oldmodel = None
for varlist in decorated:
if not sortbyvals:
model, isvector, varstr, _, var, val = varlist
else:
model, _, isvector, varstr, _, var, val = varlist
if model not in models:
continue
if model != oldmodel and len(models) > 1:
if oldmodel is not None:
lines.append(["", "", "", ""])
if model != "":
if not latex:
lines.append([("modelname",), model, "", ""])
else:
lines.append([r"\multicolumn{3}{l}{\textbf{" +
model + r"}} \\"])
oldmodel = model
label = var.descr.get('label', '')
units = var.unitstr(" [%s] ") if printunits else ""
if isvector:
# TODO: pretty n-dimensional printing?
if columns is not None:
ncols = columns
else:
last_dim_index = len(val.shape)-1
horiz_dim = last_dim_index # default alignment
ncols = 1
for i, dim_size in enumerate(val.shape):
if dim_size >= ncols and dim_size <= maxcolumns:
horiz_dim = i
ncols = dim_size
# align the array with horiz_dim by making it the last one
dim_order = range(last_dim_index)
dim_order.insert(horiz_dim, last_dim_index)
val = val.transpose(dim_order)
flatval = val.flatten()
vals = [vecfmt % v for v in flatval[:ncols]]
bracket = " ] " if len(flatval) <= ncols else ""
valstr = "[ %s%s" % (" ".join(vals), bracket)
else:
valstr = valfmt % val
for before, after in VALSTR_REPLACES:
valstr = valstr.replace(before, after)
if not latex:
lines.append([varstr, valstr, units, label])
if isvector and len(flatval) > ncols:
values_remaining = len(flatval) - ncols
while values_remaining > 0:
idx = len(flatval)-values_remaining
vals = [vecfmt % v for v in flatval[idx:idx+ncols]]
values_remaining -= ncols
valstr = " " + " ".join(vals)
for before, after in VALSTR_REPLACES:
valstr = valstr.replace(before, after)
if values_remaining <= 0:
spaces = (-values_remaining
* len(valstr)/(values_remaining + ncols))
valstr = valstr + " ]" + " "*spaces
lines.append(["", valstr, "", ""])
else:
varstr = "$%s$" % varstr.replace(" : ", "")
if latex == 1: # normal results table
lines.append([varstr, valstr, "$%s$" % var.latex_unitstr(),
label])
coltitles = [title, "Value", "Units", "Description"]
elif latex == 2: # no values
lines.append([varstr, "$%s$" % var.latex_unitstr(), label])
coltitles = [title, "Units", "Description"]
elif latex == 3: # no description
lines.append([varstr, valstr, "$%s$" % var.latex_unitstr()])
coltitles = [title, "Value", "Units"]
else:
raise ValueError("Unexpected latex option, %s." % latex)
if rawlines:
return lines
if not latex:
if lines:
maxlens = np.max([list(map(len, line)) for line in lines
if line[0] != ("modelname",)], axis=0)
if not fixedcols:
maxlens = [maxlens[0], 0, 0, 0]
dirs = ['>', '<', '<', '<']
# check lengths before using zip
assert len(list(dirs)) == len(list(maxlens))
fmts = [u'{0:%s%s}' % (direc, L) for direc, L in zip(dirs, maxlens)]
for i, line in enumerate(lines):
if line[0] == ("modelname",):
line = [fmts[0].format(" | "), line[1]]
else:
line = [fmt.format(s) for fmt, s in zip(fmts, line)]
lines[i] = "".join(line).rstrip()
lines = [title] + ["-"*len(title)] + lines + [""]
elif lines:
colfmt = {1: "llcl", 2: "lcl", 3: "llc"}
lines = (["\n".join(["{\\footnotesize",
"\\begin{longtable}{%s}" % colfmt[latex],
"\\toprule",
" & ".join(coltitles) + " \\\\ \\midrule"])] +
[" & ".join(l) + " \\\\" for l in lines] +
["\n".join(["\\bottomrule", "\\end{longtable}}", ""])])
return lines
| mit |
PatrickChrist/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
simontorres/goodman | dev-tools/simulator.py | 1 | 5146 | from astropy.io import fits
from astropy.modeling import models, fitting
import numpy as np
import matplotlib.pyplot as plt
import pickle
class DataSimulator(object):
def __init__(self, data_dimensions=1, x_size=None, y_size=None):
self.dimensions = data_dimensions
if self.dimensions == 1:
self.x_length = x_size
self.y_length = y_size
elif self.dimensions == 2 and (x_size is not None and y_size is not None):
self.x_length = x_size
self.y_length = y_size
else:
raise NotImplementedError
def __call__(self, *args, **kwargs):
if self.dimensions == 1:
if self.x_length is None and self.y_length is not None:
print('object detection')
zero_array = np.zeros(self.y_length)
elif self.y_length is None:
pass
def intensity(axis):
return np.exp(axis/100.) *(3 * axis ** 2 + 5 * axis + 20000) + axis ** 3 + 1000 * np.sin(axis)
# return 20000 * abs(np.sin(axis) +np.exp(axis) *np.sin(axis + 0.75 * np.pi)) +
def make_2d_spectra(separation_fwhm=0, n_targets=1, fwhm=8., intens=0., noise_level=1., plots=False):
x = 4056
y = 1550
header_copy = fits.getheader('/data/simon/data/soar/work/20161114_eng/reduced_data/fzh.0298_CVSO166_400m2_gg455.fits')
header_copy['OBJECT'] = 'Test-%s'%str(separation_fwhm)
header_copy['HISTORY'] = 'Simulated spectrum N-sources %s separation_fwhm %s FWHM %s' % (n_targets, separation_fwhm, fwhm)
targets = n_targets
target_separation_fwhm = float(separation_fwhm)
image = []
if targets > 1:
target_location = [y / 2. - target_separation_fwhm / float(targets) * fwhm,
y / 2. + target_separation_fwhm / float(targets) * fwhm]
print(separation_fwhm, int(y / 2.), target_location, (target_separation_fwhm / targets) * fwhm)
else:
target_location = [y / 2.]
sub_x = np.linspace(0, 10, x)
y_axis = range(y)
spectrum = [[] for i in range(int(targets))]
for i in range(x):
if noise_level == 0:
data = np.ones(y)
else:
data = np.random.normal(10, noise_level, y)
for tar in range(int(targets)):
amplitude = intens * intensity(sub_x[i])
# theo_snr = amplitude / noise_level
# print theo_snr
# gauss = models.Gaussian1D(amplitude=amplitude, mean=target_location[tar], stddev=fwhm) 8.24687326842
voigt = models.Voigt1D(amplitude_L=amplitude, x_0=target_location[tar], fwhm_L=0.942561669206, fwhm_G=fwhm)
# gauss2 = models.Gaussian1D(amplitude=amplitude, mean=target_location[1], stddev=fwhm)
# spectrum[tar].append(amplitude)
sd = voigt(y_axis)
spectrum[tar].append(np.sum(sd[int(target_location[tar] - 2.5 * fwhm):int(target_location[tar] + 2.5 * fwhm)]))
# gauss.amplitude.value = amplitude
data += voigt(y_axis)
# signal_peak = xxxx
# snr = 5
# noise_level = 400.
# noise_amplitude = signal_peak / snr
# data = np.random.normal(noise_level, noise_amplitude, data.shape)
# data2 = gauss2(y_axis)
# plt.plot(data)
# plt.plot(data2)
# plt.show()
if i == int(x / 2.) and plots:
# plt.title('FWHM Separation: %s' % separation_fwhm)
plt.title('Intensity: %s' % (intens))
plt.axvline(int(target_location[tar] - 2.5 * fwhm), color='r')
plt.axvline(int(target_location[tar] + 2.5 * fwhm), color='r')
plt.plot(data)
plt.show()
image.append(data)
# plt.plot(y_axis, data)
# plt.show()
# rotated = zip(*original[::-1])
rotated_image = zip(*image[::-1])
hdu_name_file = '20161128_single-object_n%s_s%s-fwhm_%1.3f_int.fits'% (str(int(n_targets)), str(int(separation_fwhm)), intens)
print(hdu_name_file)
new_hdu = fits.PrimaryHDU(rotated_image, header=header_copy)
new_hdu.writeto(hdu_name_file, clobber=True)
for part_index in range(len(spectrum)):
# f = open('obj.save', 'wb')
# cPickle.dump(my_obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
# f.close()
f = open(hdu_name_file.replace('.fits','_%s.pkl' % str(part_index + 1)), 'wb')
pickle.dump(spectrum[part_index][::-1], f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
plt.plot(spectrum[part_index][::-1])
plt.title('Simulated spectrum')
plt.xlabel('Pixel Axis')
plt.ylabel('Peak Intensity')
plt.savefig('img/' + hdu_name_file.replace('.fits','.png'), dpi=300)
if plots: # pragma: no cover
plt.show()
if plots: # pragma: no cover
plt.title('Target Separation %s - N targets %s' % (str(separation_fwhm), targets))
plt.imshow(rotated_image)
plt.show()
if __name__ == '__main__':
inten = np.linspace(0.1, 1.3, 20)
#for sep in range(1,15):
for inte in inten:
make_2d_spectra(n_targets=1, fwhm=8.24687326842, intens=inte, noise_level=3.5)
| bsd-3-clause |
madjelan/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
timothydmorton/VESPA | vespa/transitsignal.py | 1 | 15139 | from __future__ import division, print_function
import os,os.path
import logging
import pickle
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numpy.random as rand
from scipy.stats import gaussian_kde
import corner
from emcee.autocorr import integrated_time, AutocorrError
from astropy.io import ascii
else:
np, pd, plt, rand = (None, None, None, None)
gaussian_kde = None
try:
import corner
except ImportError:
pass
if not on_rtd:
from .plotutils import setfig
from .hashutils import hashcombine, hasharray
from .transit_basic import traptransit, fit_traptransit, traptransit_MCMC, MAXSLOPE
from .statutils import kdeconf, qstd, conf_interval
else:
MAXSLOPE = None
def load_pkl(filename):
with open(filename, 'rb') as fin:
return pickle.load(fin)
class TransitSignal(object):
"""A phased-folded transit signal.
Epoch of the transit at 0, 'continuum' set at 1.
:param ts, fs, dfs:
Times (days from mid-transit), fluxes (relative to 1),
flux uncertainties. dfs optional
:param P:
Orbital period.
:param p0: (optional)
Initial guess for least-squares trapezoid fit.
If not provided, then some decent guess will be made
(which is better on made-up data than real...)
:param name: (optional)
Name of the signal.
:param maxslope: (optional)
Upper limit to use for "slope" parameter (T/tau)
in the MCMC fitting of signal. Default is 15.
.. note:: The implementation of this object can use some refactoring;
as it is directly translated from some older code. As
such, not all methods/attributes are well documented.
"""
def __init__(self,ts,fs,dfs=None,P=None,p0=None,name='',maxslope=MAXSLOPE):
ts = np.atleast_1d(ts)
fs = np.atleast_1d(fs)
inds = ts.argsort()
self.ts = ts[inds]
self.fs = fs[inds]
self.name = name
self.P = P
self.maxslope = maxslope
if type(P) == type(np.array([1])):
self.P = P[0]
#set default best-guess trapezoid parameters
if p0 is None:
depth = 1 - fs.min()
duration = (fs < (1-0.01*depth)).sum()/float(len(fs)) * (ts[-1] - ts[0])
tc0 = ts[fs.argmin()]
p0 = np.array([duration,depth,5.,tc0])
tfit = fit_traptransit(ts,fs,p0)
if dfs is None:
dfs = (self.fs - traptransit(self.ts,tfit)).std()
if np.size(dfs)==1:
dfs = np.ones(len(self.ts))*dfs
self.dfs = dfs
self.dur,self.depth,self.slope,self.center = tfit
self.trapfit = tfit
logging.debug('trapezoidal leastsq fit: {}'.format(self.trapfit))
self.hasMCMC = False
@classmethod
def from_ascii(cls, filename, **kwargs):
table = ascii.read(filename).to_pandas()
if len(table.columns)==3:
return cls(table.iloc[:, 0].values, table.iloc[:, 1].values, table.iloc[:, 2].values,
**kwargs)
elif len(table.columns)==2:
return cls(table.iloc[:, 0].values, table.iloc[:, 1].values,
**kwargs)
def save_hdf(self, filename, path=''):
"""
Save transitsignal info using HDF...not yet implemented.
.. note::
Refactoring plan is to re-write saving to use HDF
instead of pickle.
"""
raise NotImplementedError
def triangle(self, **kwargs):
pts = np.array([self.logdeps, self.durs, self.slopes]).T
fig = corner.corner(pts, labels=['log (Depth)',
'Duration', 'T/tau'], **kwargs)
return fig
def save_pkl(self, filename):
"""
Pickles TransitSignal.
"""
with open(filename, 'wb') as fout:
pickle.dump(self, fout)
#eventually make this save_hdf
def save(self, filename):
"""
Calls save_pkl function.
"""
self.save_pkl(filename)
def __eq__(self,other):
return hash(self) == hash(other)
def __hash__(self):
key = hashcombine(hasharray(self.ts),
hasharray(self.fs),
self.P,
self.maxslope)
if self.hasMCMC:
key = hashcombine(key, hasharray(self.slopes),
hasharray(self.durs),
hasharray(self.logdeps))
return key
def plot(self, fig=None, plot_trap=False, name=False, trap_color='g',
trap_kwargs=None, **kwargs):
"""
Makes a simple plot of signal
:param fig: (optional)
Argument for :func:`plotutils.setfig`.
:param plot_trap: (optional)
Whether to plot the (best-fit least-sq) trapezoid fit.
:param name: (optional)
Whether to annotate plot with the name of the signal;
can be ``True`` (in which case ``self.name`` will be
used), or any arbitrary string.
:param trap_color: (optional)
Color of trapezoid fit line.
:param trap_kwargs: (optional)
Keyword arguments to pass to trapezoid fit line.
:param **kwargs: (optional)
Additional keyword arguments passed to ``plt.plot``.
"""
setfig(fig)
plt.plot(self.ts,self.fs,'.',**kwargs)
if plot_trap and hasattr(self,'trapfit'):
if trap_kwargs is None:
trap_kwargs = {}
plt.plot(self.ts, traptransit(self.ts,self.trapfit),
color=trap_color, **trap_kwargs)
if name is not None:
if type(name)==type(''):
text = name
else:
text = self.name
plt.annotate(text,xy=(0.1,0.1),xycoords='axes fraction',fontsize=22)
if hasattr(self,'depthfit') and not np.isnan(self.depthfit[0]):
lo = 1 - 3*self.depthfit[0]
hi = 1 + 2*self.depthfit[0]
else:
lo = 1
hi = 1
sig = qstd(self.fs,0.005)
hi = max(hi,self.fs.mean() + 7*sig)
lo = min(lo,self.fs.mean() - 7*sig)
logging.debug('lo={}, hi={}'.format(lo,hi))
plt.ylim((lo,hi))
plt.xlabel('time [days]')
plt.ylabel('Relative flux')
def MCMC(self, niter=500, nburn=200, nwalkers=200, threads=1,
fit_partial=False, width=3, savedir=None, refit=False,
thin=10, conf=0.95, maxslope=MAXSLOPE, debug=False, p0=None):
"""
Fit transit signal to trapezoid model using MCMC
.. note:: As currently implemented, this method creates a
bunch of attributes relevant to the MCMC fit; I plan
to refactor this to define those attributes as properties
so as not to have their creation hidden away here. I plan
to refactor how this works.
"""
if fit_partial:
wok = np.where((np.absolute(self.ts-self.center) < (width*self.dur)) &
~np.isnan(self.fs))
else:
wok = np.where(~np.isnan(self.fs))
if savedir is not None:
if not os.path.exists(savedir):
os.mkdir(savedir)
alreadydone = True
alreadydone &= savedir is not None
alreadydone &= os.path.exists('%s/ts.npy' % savedir)
alreadydone &= os.path.exists('%s/fs.npy' % savedir)
if savedir is not None and alreadydone:
ts_done = np.load('%s/ts.npy' % savedir)
fs_done = np.load('%s/fs.npy' % savedir)
alreadydone &= np.all(ts_done == self.ts[wok])
alreadydone &= np.all(fs_done == self.fs[wok])
if alreadydone and not refit:
logging.info('MCMC fit already done for %s. Loading chains.' % self.name)
Ts = np.load('%s/duration_chain.npy' % savedir)
ds = np.load('%s/depth_chain.npy' % savedir)
slopes = np.load('%s/slope_chain.npy' % savedir)
tcs = np.load('%s/tc_chain.npy' % savedir)
else:
logging.info('Fitting data to trapezoid shape with MCMC for %s....' % self.name)
if p0 is None:
p0 = self.trapfit.copy()
p0[0] = np.absolute(p0[0])
if p0[2] < 2:
p0[2] = 2.01
if p0[1] < 0:
p0[1] = 1e-5
logging.debug('p0 for MCMC = {}'.format(p0))
sampler = traptransit_MCMC(self.ts[wok],self.fs[wok],self.dfs[wok],
niter=niter,nburn=nburn,nwalkers=nwalkers,
threads=threads,p0=p0,return_sampler=True,
maxslope=maxslope)
Ts,ds,slopes,tcs = (sampler.flatchain[:,0],sampler.flatchain[:,1],
sampler.flatchain[:,2],sampler.flatchain[:,3])
self.sampler = sampler
if savedir is not None:
np.save('%s/duration_chain.npy' % savedir,Ts)
np.save('%s/depth_chain.npy' % savedir,ds)
np.save('%s/slope_chain.npy' % savedir,slopes)
np.save('%s/tc_chain.npy' % savedir,tcs)
np.save('%s/ts.npy' % savedir,self.ts[wok])
np.save('%s/fs.npy' % savedir,self.fs[wok])
if debug:
print(Ts)
print(ds)
print(slopes)
print(tcs)
N = len(Ts)
try:
self.Ts_acor = integrated_time(Ts)
self.ds_acor = integrated_time(ds)
self.slopes_acor = integrated_time(slopes)
self.tcs_acor = integrated_time(tcs)
self.fit_converged = True
except AutocorrError:
self.fit_converged = False
ok = (Ts > 0) & (ds > 0) & (slopes > 0) & (slopes < self.maxslope)
logging.debug('trapezoidal fit has {} good sample points'.format(ok.sum()))
if ok.sum()==0:
if (Ts > 0).sum()==0:
#logging.debug('{} points with Ts > 0'.format((Ts > 0).sum()))
logging.debug('{}'.format(Ts))
raise MCMCError('{}: 0 points with Ts > 0'.format(self.name))
if (ds > 0).sum()==0:
#logging.debug('{} points with ds > 0'.format((ds > 0).sum()))
logging.debug('{}'.format(ds))
raise MCMCError('{}: 0 points with ds > 0'.format(self.name))
if (slopes > 0).sum()==0:
#logging.debug('{} points with slopes > 0'.format((slopes > 0).sum()))
logging.debug('{}'.format(slopes))
raise MCMCError('{}: 0 points with slopes > 0'.format(self.name))
if (slopes < self.maxslope).sum()==0:
#logging.debug('{} points with slopes < maxslope ({})'.format((slopes < self.maxslope).sum(),self.maxslope))
logging.debug('{}'.format(slopes))
raise MCMCError('{} points with slopes < maxslope ({})'.format((slopes < self.maxslope).sum(),self.maxslope))
durs,deps,logdeps,slopes = (Ts[ok],ds[ok],np.log10(ds[ok]),
slopes[ok])
inds = (np.arange(len(durs)/thin)*thin).astype(int)
durs,deps,logdeps,slopes = (durs[inds],deps[inds],logdeps[inds],
slopes[inds])
self.durs,self.deps,self.logdeps,self.slopes = (durs,deps,logdeps,slopes)
self._make_kde(conf=conf)
self.hasMCMC = True
def corner(self, outfile=None, plot_contours=False, **kwargs):
fig = corner.corner(self.kde.dataset.T, labels=['Duration', 'log(depth)', 'T/tau'],
plot_contours=False, **kwargs)
if outfile is not None:
fig.savefig(outfile)
return fig
def _make_kde(self, conf=0.95):
self.durkde = gaussian_kde(self.durs)
self.depthkde = gaussian_kde(self.deps)
self.slopekde = gaussian_kde(self.slopes)
self.logdepthkde = gaussian_kde(self.logdeps)
if self.fit_converged:
try:
durconf = kdeconf(self.durkde,conf)
depconf = kdeconf(self.depthkde,conf)
logdepconf = kdeconf(self.logdepthkde,conf)
slopeconf = kdeconf(self.slopekde,conf)
except:
raise
raise MCMCError('Error generating confidence intervals...fit must not have worked.')
durmed = np.median(self.durs)
depmed = np.median(self.deps)
logdepmed = np.median(self.logdeps)
slopemed = np.median(self.slopes)
self.durfit = (durmed,np.array([durmed-durconf[0],durconf[1]-durmed]))
self.depthfit = (depmed,np.array([depmed-depconf[0],depconf[1]-depmed]))
self.logdepthfit = (logdepmed,np.array([logdepmed-logdepconf[0],logdepconf[1]-logdepmed]))
self.slopefit = (slopemed,np.array([slopemed-slopeconf[0],slopeconf[1]-slopemed]))
else:
self.durfit = (np.nan,(np.nan,np.nan))
self.depthfit = (np.nan,(np.nan,np.nan))
self.logdepthfit = (np.nan,(np.nan,np.nan))
self.slopefit = (np.nan,(np.nan,np.nan))
points = np.array([self.durs,self.logdeps,self.slopes])
self.kde = gaussian_kde(points)
class TransitSignal_FromSamples(TransitSignal):
"""Use this if all you have is the trapezoid-fit samples
"""
def __init__(self, period, durs, depths, slopes,
name='', **kwargs):
self.period = period
self.durs = durs
self.deps = depths
self.logdeps = np.log10(depths)
self.slopes = slopes
self.hasMCMC = True
self.fit_converged = True #better be
self._make_kde()
self.name = name
def MCMC(self, *args, **kwargs):
pass
def plot(self, *args, **kwargs):
pass
def __hash__(self):
return hashcombine(self.period, hasharray(self.durs),
hasharray(self.deps),
hasharray(self.slopes))
class TransitSignal_DF(TransitSignal):
def __init__(self, df, columns=['t','f','e_f'], **kwargs):
t_col, f_col, e_f_col = columns
t = df[t_col]
f = df[f_col]
if e_f_col in df:
e_f = df[e_f_col]
else:
e_f = None
TransitSignal.__init__(self, t, f, e_f, **kwargs)
class TransitSignal_ASCII(TransitSignal):
def __init__(self, filename, cols=(0,1), err_col=2, **kwargs):
t, f = np.loadtxt(filename, usecols=cols, unpack=True)
try:
e_f = np.loadtxt(filename, usecols=(err_col,))
except:
e_f = None
TransitSignal.__init__(self, t, f, e_f, **kwargs)
############# Exceptions ##############3
class MCMCError(Exception):
pass
| mit |
aicenter/roadmap-processing | prune_gpx.py | 1 | 5505 | #
# Copyright (c) 2021 Czech Technical University in Prague.
#
# This file is part of Roadmaptools
# (see https://github.com/aicenter/roadmap-processing).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from roadmaptools.inout import load_gpx, save_gpx
from tqdm import tqdm
import os
# data = load_gpx('/home/martin/Stažené/traces-raw-old.gpx')
# save_gpx(data,'neexistujici slozka')
def file_len(fname):
letters = 0
with open(fname, 'r') as f:
for line in f.readlines():
letters += len(line)
return letters
def cut_gpx(in_file, out_file):
number_of_chars = file_len(in_file)
print(number_of_chars)
number_of_chars /= 2
number_of_chars = int(number_of_chars)
# number_of_lines = 300000000
print(number_of_chars)
result = ''
with open(in_file, 'r') as fh:
for line in fh.readlines():
if len(result) < number_of_chars:
result += line
elif not line.startswith(' <trkpt'):
# print(line.startswith(' <trkpt'))
result += line
else:
break
print(len(result))
result += '\n</trkseg>\n</trk>\n</gpx>'
with open(out_file, 'w') as ofh:
ofh.write(result)
# data = load_gpx('/home/martin/Stažené/traces-raw.gpx')
# cut_gpx('/home/martin/Stažené/traces-raw.gpx','/home/martin/Stažené/traces-raw-mensi.gpx')
# def _read_all_csv_from_dir(directory: str):
# traces_all = []
#
# for filename in tqdm(os.listdir(directory), desc="Loading and parsing traces"):
# if os.path.splitext(filename)[-1] == '.csv':
# abs_filename = os.path.join(directory, filename)
# traces_all.append(_load_traces_from_csv(abs_filename))
# return traces_all
f = lambda x: True if x != "STREETPICKUP" else False
import pandas as pd
import numpy as np
import roadmaptools
ls = []
len_ids = set()
pd.set_option('display.max_columns', None)
column_names = ['id_record', 'id_car', 'status', 'lat', 'lon', 'time']
use_columns = ['id_car', 'status', 'lat', 'lon', 'time']
# data_types = [str,int,str,float,float,str]
arr = np.empty(shape=(len(os.listdir('/home/martin/MOBILITY/data/traces')),),dtype=object)
for idx, filename in tqdm(enumerate(os.listdir('/home/martin/MOBILITY/data/traces'))):
abs_filename = os.path.join('/home/martin/MOBILITY/data/traces', filename)
filename_parts = abs_filename.split(sep='.')
file_extension = filename_parts[-2]
df = pd.read_csv(abs_filename, header=None, names=column_names, usecols=['id_car', 'status', 'lat', 'lon', 'time'], converters={'status': f})
df['id_car'] = pd.to_numeric(file_extension + df['id_car'].astype(str))
# print(df.head())
# print(df.dtypes)
# q = sort_one_by_one(df,'id_car','time')
# print(q.head())
filtered = df[df.loc[:, 'status']].loc[:, ['id_car', 'lat', 'lon', 'time']]
filtered.sort_values(by='time', ascending=True, inplace=True)
filtered.sort_values(by='id_car', kind='mergesort', ascending=True, inplace=True)
# print(filtered)
# ls.append(filtered)
arr[idx] = filtered
df = pd.concat(arr, ignore_index=True)
# # exit(0)
# print(df)
# df.sort_values(by=['id_car','time'],ascending=True,inplace=True)
# print(df)
# for idx, row in df.iterrows():
# print(idx)
# iter_csv = pd.read_csv(abs_filename,iterator=True,chunksize=1000)
# df = pd.concat([chunk[chunk['field'] > constant] for chunk in iter_csv])
# for row in df:
# len_id = len(row[1])
# if len_id not in len_ids:
# len_ids.add(len_id)
#
# print(len_ids)
# from numpy import genfromtxt
# from numpy.core.defchararray import add
#
# # my_data = genfromtxt('my_file.csv', delimiter=',')
# arr = np.empty(shape=(len(os.listdir('/home/martin/MOBILITY/data/traces')),), dtype=object)
# for idx, filename in tqdm(enumerate(os.listdir('/home/martin/MOBILITY/data/traces')[:5])):
# abs_filename = os.path.join('/home/martin/MOBILITY/data/traces', filename)
# filename_parts = abs_filename.split(sep='.')
# file_extension = filename_parts[-2]
# arr[idx] = genfromtxt(abs_filename, delimiter=',', usecols=(1, 2, 3, 4, 5), encoding=None,dtype=None)
# # arr[idx][0] = add(file_extension, arr[idx][0])
# arr[idx][0].astype(int)
# print(arr[idx].dtype)
# print(arr)
# arr = np.empty(hash(5,))
# l = []
# for idx, filename in tqdm(enumerate(os.listdir('/home/martin/MOBILITY/data/traces'))):
# abs_filename = os.path.join('/home/martin/MOBILITY/data/traces', filename)
# filename_parts = abs_filename.split(sep='.')
# file_extension = filename_parts[-2]
# iterator = roadmaptools.inout.load_csv(abs_filename)
#
# ls = []
# for row in iterator:
# if row[2] == "STREETPICKUP":
# continue
#
# ls.append(np.array([int(row[1]),float(row[3]),float(4),row[5]]))
# l.append(ls)
| mit |
SciBase-Project/internationality-journals | src/aminer_community.py | 3 | 2600 | print "[INFO] Reading aminer_cites.json"
# nodes belonging to each publication
nodes = {}
# self cited edges
edge_list_1 = []
# non self cited edges
edge_list_2 = []
# publication edges
edge_list_3 = []
import json
with open('../output/aminer_cites.json') as data_file:
data = json.load(data_file)
for publication in data :
papers = data[publication]
for paper in papers :
# add edge to publication
src = paper
edge_list_3.append((publication, src))
# add node to respective publication
if publication not in nodes :
nodes[publication] = []
nodes[publication].append(paper)
cites = data[publication][paper]
for cite in cites :
src = paper
dest = cite['index']
# add node to respective publication
cite_pub = cite['publication']
if cite_pub not in nodes :
nodes[cite_pub] = []
nodes[cite_pub].append(dest)
# add edges
edge = (src, dest)
# self cited edge
if cite['self'] == True : edge_list_1.append(edge)
# non self cited edge
else : edge_list_2.append(edge)
# add edge to publication
edge_list_3.append((cite_pub, dest))
# remove all duplicates
edge_list_3 = list(set(edge_list_3))
# remove all duplicates
for pub in nodes :
nodes[pub] = list(set(nodes[pub]))
print "[INFO] Done reading"
print "[INFO] Generating graph"
import networkx as nx
import matplotlib.pyplot as plt
# make a new graph
G = nx.Graph()
all_edges = []
all_edges.extend(edge_list_1)
all_edges.extend(edge_list_2)
all_edges.extend(edge_list_3)
G.add_edges_from(all_edges)
cite_dict = {}
edge_dict = {}
import community
#first compute the best partitionf
partition=community.best_partition(G)
#drawing the graph based on number of links
size = float(len(set(partition.values())))
pos = nx.spring_layout(G)
count = 0.
for com in set(partition.values()) :
count = count + 1.
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20,
node_color = str(count / size))
#nx.draw_networkx_edges(G,pos, alpha=0.5)
nx.draw(G,pos,node_size=15,alpha=1,node_color="blue", with_labels=False) # aplha = transparency, labels = names
#plt.savefig("aminer_smallest.png",dpi=1000)
#nx.draw_networkx_edges(G,pos, alpha=0.5)
plt.show()
print "[INFO] Done generating graph" | mit |
yausern/stlab | devices/Keysight_N9010B.py | 1 | 3791 | from stlab.devices.instrument import instrument
from stlab.utils.stlabdict import stlabdict
import numpy as np
import pandas as pd
class Keysight_N9010B(instrument):
def __init__(self,
addr='TCPIP::192.168.1.228::INSTR',
reset=True,
verb=True):
super().__init__(addr,reset,verb)
self.dev.timeout = None
def SetStart(self, x):
mystr = 'FREQ:STAR {}'.format(x)
self.write(mystr)
def SetStop(self, x):
mystr = 'FREQ:STOP {}'.format(x)
self.write(mystr)
def SetCenter(self, x):
mystr = 'FREQ:CENT {}'.format(x)
self.write(mystr)
def SetSpan(self, x):
mystr = 'FREQ:SPAN {}'.format(x)
self.write(mystr)
def SetResolutionBW(self, x):
mystr = 'BAND:RES {}'.format(x)
self.write(mystr)
def GetResolutionBW(self):
mystr = 'BAND:RES?'
x = self.query(mystr)
return float(x)
def SetDigitalIFBW(self,x):
mystr = 'WAV:DIF:BAND {}'.format(x)
self.write(mystr)
def GetDigitalIFBW(self):
mystr = 'WAV:DIF:BAND?'
x = self.query(mystr)
return float(x)
def SetSampleRate(self,x):
mystr = 'WAV:SRAT {}'.format(x)
self.write(mystr)
def GetSampleRate(self):
mystr = 'WAV:SRAT?'
x = self.query(mystr)
return float(x)
def SetIQSweepTime(self,x):
mystr = 'WAVeform:SWEep:TIME {}'.format(x)
self.write(mystr)
def GetIQSweepTime(self):
mystr = 'WAVeform:SWEep:TIME?'
x = self.query(mystr)
return float(x)
def SetPoints(self,x):
self.write('SWE:POIN {}'.format(x))
def SetAverages(self,navg):
#self.write('AVER:TYPE RMS') # Power averaging
self.write('AVER:COUNT {}'.format(navg))
if navg > 1:
self.write(':TRAC:TYPE AVER')
else:
self.write(':TRAC:TYPE WRITE')
def GetAverages(self):
tracetype = self.query(':TRAC:TYPE?')
if tracetype == 'AVER\n':
navg = self.query('AVER:COUNT?')
return float(navg)
else:
return 1
def SetContinuous(self,state=True):
if state:
self.write('INIT:CONT 1')
else:
self.write('INIT:CONT 0')
return
def GetSweepTime(self):
sweeptime = self.query('SWE:TIME?')
return float(sweeptime)
def MeasureIQ(self):
self.SetContinuous(False)
result = self.query(':READ:WAVeform0?')
result = result.split(',')
result = [float(x) for x in result]
result = np.asarray(result)
I = result[::2]
Q = result[1::2]
tend = self.GetIQSweepTime()
t = np.linspace(0,tend,len(I)+1)
t = t[1:]
#print((t[1]-t[0]))
#print(1/(t[1]-t[0]))
#print(self.GetSampleRate())
output = pd.DataFrame()
output['Time (s)'] = t
output['I (V)'] = I
output['Q (V)']= Q
output['Digital IFBW (Hz)'] = self.GetDigitalIFBW()
return output
def MeasureScreen(self):
self.SetContinuous(False)
result = self.query('READ:SAN?')
result = result.split(',')
result = [float(x) for x in result]
result = np.asarray(result)
xx = result[::2]
yy = result[1::2]
#output = stlabdict()
output = pd.DataFrame()
output['Frequency (Hz)'] = xx
output['PSD (dBm)'] = yy
output['Res BW (Hz)']= self.GetResolutionBW()
return output
def GetMetadataString(self): #Should return a string of metadata adequate to write to a file
pass
| gpl-3.0 |
SiccarPoint/landlab | landlab/__init__.py | 1 | 1439 | #! /usr/bin/env python
"""The Landlab
:Package name: TheLandlab
:Release date: 2013-03-24
:Authors: Greg Tucker, Nicole Gasparini, Erkan Istanbulluoglu, Daniel Hobley,
Sai Nudurupati, Jordan Adams, Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
__version__ = '1.0.0-beta.6'
import os
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__ = ['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester']
| mit |
1313e/e13Tools | e13tools/tests/test_pyplot.py | 1 | 4708 | # -*- coding: utf-8 -*-
# %% IMPORTS
# Built-in imports
from os import path
# Package imports
import astropy.units as apu
import matplotlib.pyplot as plt
import pytest
# e13Tools imports
from e13tools.core import InputError
from e13tools.pyplot import (
apu2tex, center_spines, draw_textline, f2tex, q2tex)
# Save the path to this directory
dirpath = path.dirname(__file__)
# %% PYTEST CLASSES AND FUNCTIONS
# Pytest for apu2tex()-function
def test_apu2tex():
assert apu2tex(apu.solMass) == r"\mathrm{M_{\odot}}"
assert apu2tex(apu.solMass/apu.yr, unitfrac=False) ==\
r"\mathrm{M_{\odot}\,yr^{-1}}"
assert apu2tex(apu.solMass/apu.yr, unitfrac=True) ==\
r"\mathrm{\frac{M_{\odot}}{yr}}"
# Pytest class for center_spines()-function
class Test_center_spines(object):
# Test if default values work
def test_default(self):
fig = plt.figure()
center_spines()
plt.close(fig)
# Test if setting the x and y tickers work
def test_set_tickers(self):
fig = plt.figure()
plt.plot([-1, 1], [-1, 1])
center_spines(set_xticker=1, set_yticker=1)
plt.close(fig)
# Pytest class for draw_textline()-function
class Test_draw_textline(object):
# Test if writing 'test' on the x-axis works, number 1
def test_x_axis1(self):
fig = plt.figure()
draw_textline("test", x=-1, text_kwargs={'va': None})
plt.close(fig)
# Test if writing 'test' on the x-axis works, number 2
def test_x_axis2(self):
fig = plt.figure()
draw_textline("test", x=2)
plt.close(fig)
# Test if writing 'test' on the y-axis works, number 1
def test_y_axis1(self):
fig = plt.figure()
draw_textline("test", y=-1)
plt.close(fig)
# Test if writing 'test' on the y-axis works, number 2
def test_y_axis2(self):
fig = plt.figure()
draw_textline("test", y=2)
plt.close(fig)
# Test if writing 'test' on the x-axis works, end-top pos
def test_x_axis_end_top(self):
fig = plt.figure()
draw_textline("test", x=-1, pos="end top")
plt.close(fig)
# Test if writing 'test' on the x-axis works, start-bottom pos
def test_x_axis_start_bottom(self):
fig = plt.figure()
draw_textline("test", x=-1, pos="start bottom")
plt.close(fig)
# Test if writing 'test' on the x-axis works, end-bottom pos
def test_x_axis_end_bottom(self):
fig = plt.figure()
draw_textline("test", x=-1, pos="end bottom")
plt.close(fig)
# Test if writing 'test' on the x-axis y-axis fails
def test_xy_axis(self):
fig = plt.figure()
with pytest.raises(InputError):
draw_textline("test", x=-1, y=-1)
plt.close(fig)
# Test if writing 'test' on the x-axis fails for invalid pos
def test_x_axis_invalid_pos(self):
fig = plt.figure()
with pytest.raises(ValueError):
draw_textline("test", x=-1, pos="test")
plt.close(fig)
# Pytest for f2tex()-function
def test_f2tex():
assert f2tex(20.2935826592) == "20.29"
assert f2tex(20.2935826592, sdigits=6) == "20.2936"
assert f2tex(20.2935826592, power=1) == r"2.029\cdot 10^{1}"
assert f2tex(1e6, nobase1=True) == "10^{6}"
assert f2tex(1e6, nobase1=False) == r"1.000\cdot 10^{6}"
assert f2tex(0) == "0."
assert f2tex(20.2935826592, 0.1) == r"20.29\pm 0.10"
assert f2tex(20.2935826592, 0.1, 0.2) == "20.29^{+0.10}_{-0.20}"
assert f2tex(20.2935826592, 0.1, 0.0) == "20.29^{+0.10}_{-0.00}"
assert f2tex(1e6, 12, 10) == r"1.000^{+0.000}_{-0.000}\cdot 10^{6}"
# Pytest for q2tex()-function
def test_q2tex():
assert q2tex(20.2935826592) == "20.29"
assert q2tex(20.2935826592*apu.solMass/apu.yr) ==\
r"20.29\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(20.2935826592*apu.solMass/apu.yr, sdigits=6) ==\
r"20.2936\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(20.2935826592*apu.solMass/apu.yr, power=1) ==\
r"2.029\cdot 10^{1}\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(1e6*apu.solMass/apu.yr, nobase1=True) ==\
r"10^{6}\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(1e6*apu.solMass/apu.yr, nobase1=False) ==\
r"1.000\cdot 10^{6}\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(20.2935826592*apu.solMass/apu.yr, unitfrac=False) ==\
r"20.29\,\mathrm{M_{\odot}\,yr^{-1}}"
assert q2tex(20.2935826592*apu.solMass/apu.yr, unitfrac=True) ==\
r"20.29\,\mathrm{\frac{M_{\odot}}{yr}}"
assert q2tex(20.2935826592*apu.kg, 1500*apu.g) ==\
r"20.29\pm 1.50\,\mathrm{kg}"
with pytest.raises(ValueError):
q2tex(1, 1*apu.kg)
| bsd-3-clause |
probml/pyprobml | scripts/bayes_unigauss_2d_pymc3.py | 1 | 1756 | # Approximate 2d posterior using PyMc3
# https://www.ritchievink.com/blog/2019/06/10/bayesian-inference-how-we-are-able-to-chase-the-posterior/
# We use the same data and model as in posteriorGrid2d.py
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pymc3 as pm
figdir = "../figures"
import os
def save_fig(fname):
if figdir: plt.savefig(os.path.join(figdir, fname))
data = np.array([195, 182])
# lets create a grid of our two parameters
mu = np.linspace(150, 250)
sigma = np.linspace(0, 15)[::-1]
mm, ss = np.meshgrid(mu, sigma) # just broadcasted parameters
likelihood = stats.norm(mm, ss).pdf(data[0]) * stats.norm(mm, ss).pdf(data[1])
aspect = mm.max() / ss.max() / 3
extent = [mm.min(), mm.max(), ss.min(), ss.max()]
# extent = left right bottom top
prior = stats.norm(200, 15).pdf(mm) * stats.cauchy(0, 10).pdf(ss)
# Posterior - grid
unnormalized_posterior = prior * likelihood
posterior = unnormalized_posterior / np.nan_to_num(unnormalized_posterior).sum()
plt.figure()
plt.imshow(posterior, cmap='Blues', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Grid approximation')
save_fig('bayes_unigauss_2d_grid.pdf')
plt.show()
with pm.Model():
# priors
mu = pm.Normal('mu', mu=200, sd=15)
sigma = pm.HalfCauchy('sigma', 10)
# likelihood
observed = pm.Normal('observed', mu=mu, sd=sigma, observed=data)
# sample
trace = pm.sample(draws=10000, chains=1)
pm.traceplot(trace);
plt.figure()
plt.scatter(trace['mu'], trace['sigma'], alpha=0.01)
plt.xlim([extent[0], extent[1]])
plt.ylim([extent[2], extent[3]])
plt.ylabel('$\sigma$')
plt.xlabel('$\mu$')
plt.title('MCMC samples')
save_fig('bayes_unigauss_2d_pymc3_post.pdf')
plt.show() | mit |
Patrick-Cole/pygmi | pygmi/raster/ginterp.py | 1 | 58895 | # -----------------------------------------------------------------------------
# Name: ginterp.py (part of PyGMI)
#
# Author: Patrick Cole
# E-Mail: [email protected]
#
# Copyright: (c) 2013 Council for Geoscience
# Licence: GPL-3.0
#
# This file is part of PyGMI
#
# PyGMI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyGMI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
Plot Raster Data.
This is the raster data interpretation module. This module allows for the
display of raster data in a variety of modes, as well as the export of that
display to GeoTiff format.
Currently the following is supported
* Pseudo Color - data mapped to a color map
* Contours with solid contours
* RGB ternary images
* CMYK ternary images
* Sun shaded or hill shaded images
It can be very effectively used in conjunction with a GIS package which
supports GeoTiff files.
"""
import os
import sys
import copy
from math import cos, sin, tan
import numpy as np
import numexpr as ne
from PyQt5 import QtWidgets, QtCore
from scipy import ndimage
from matplotlib.figure import Figure
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.colorbar as mcolorbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.pyplot import colormaps
from matplotlib.colors import ListedColormap
import pygmi.raster.iodefs as iodefs
import pygmi.raster.dataprep as dataprep
import pygmi.menu_default as menu_default
from pygmi.raster.modest_image import imshow
class MyMplCanvas(FigureCanvasQTAgg):
"""
Canvas for the actual plot.
Attributes
----------
htype : str
string indicating the histogram stretch to apply to the data
hstype : str
string indicating the histogram stretch to apply to the sun data
cbar : matplotlib color map
color map to be used for pseudo color bars
data : list
list of PyGMI raster data objects - used for color images
sdata : list
list of PyGMI raster data objects - used for shaded images
gmode : str
string containing the graphics mode - Contour, Ternary, Sunshade,
Single Color Map.
argb : list
list of matplotlib subplots. There are up to three.
hhist : list
matplotlib hist associated with argb
hband: list
list of strings containing the band names to be used.
htxt : list
list of strings associated with hhist, denoting a raster value (where
mouse is currently hovering over on image)
image : imshow
imshow instance - this is the primary way of displaying an image.
cnt : matplotlib contour
contour instance - used for the contour image
cntf : matplotlib contourf
contourf instance - used for the contour image
background : matplotlib bounding box
image bounding box - used in blitting
bbox_hist_red : matplotlib bounding box
red histogram bounding box
bbox_hist_green : matplotlib bounding box
green histogram bounding box
bbox_hist_blue : matplotlib bounding box
blue histogram bounding box
axes : matplotlib axes
axes for the plot
pinit : numpy array
calculated with aspect - used in sunshading
qinit : numpy array
calculated with aspect - used in sunshading
phi : float
azimuth (sunshading)
theta : float
sun elevation (sunshading)
cell : float
between 1 and 100 - controls sunshade detail.
alpha : float
how much incident light is reflected (0 to 1)
kval : float
k value for cmyk mode
"""
def __init__(self, parent=None):
fig = Figure()
super().__init__(fig)
# figure stuff
self.htype = 'Linear'
self.hstype = 'Linear'
self.cbar = cm.get_cmap('jet')
self.newcmp = self.cbar
self.fullhist = False
self.data = []
self.sdata = []
self.gmode = None
self.argb = [None, None, None]
self.hhist = [None, None, None]
self.hband = [None, None, None, None]
self.htxt = [None, None, None]
self.image = None
self.cnt = None
self.cntf = None
self.background = None
self.bbox_hist_red = None
self.bbox_hist_green = None
self.bbox_hist_blue = None
self.shade = False
self.ccbar = None
self.clippercu = 0.0
self.clippercl = 0.0
self.flagresize = False
self.clipvalu = [None, None, None]
self.clipvall = [None, None, None]
gspc = gridspec.GridSpec(3, 4)
self.axes = fig.add_subplot(gspc[0:, 1:])
self.axes.xaxis.set_visible(False)
self.axes.yaxis.set_visible(False)
for i in range(3):
self.argb[i] = fig.add_subplot(gspc[i, 0])
self.argb[i].xaxis.set_visible(False)
self.argb[i].yaxis.set_visible(False)
self.argb[i].autoscale(False)
fig.subplots_adjust(bottom=0.05)
fig.subplots_adjust(top=.95)
fig.subplots_adjust(left=0.05)
fig.subplots_adjust(right=.95)
fig.subplots_adjust(wspace=0.05)
fig.subplots_adjust(hspace=0.05)
self.setParent(parent)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
self.figure.canvas.mpl_connect('motion_notify_event', self.move)
# self.cid = self.figure.canvas.mpl_connect('resize_event',
# self.init_graph)
self.cid = self.figure.canvas.mpl_connect('resize_event',
self.revent)
# sun shading stuff
self.pinit = None
self.qinit = None
self.phi = -np.pi/4.
self.theta = np.pi/4.
self.cell = 100.
self.alpha = .0
# cmyk stuff
self.kval = 0.01
def revent(self, event):
"""
Resize event.
Parameters
----------
event : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.flagresize = True
def init_graph(self, event=None):
"""
Initialize the graph.
Parameters
----------
event : TYPE, optional
Event. The default is None.
Returns
-------
None.
"""
self.figure.canvas.mpl_disconnect(self.cid)
self.axes.clear()
for i in range(3):
self.argb[i].clear()
x_1, x_2, y_1, y_2 = self.data[0].extent
self.axes.set_xlim(x_1, x_2)
self.axes.set_ylim(y_1, y_2)
self.axes.set_aspect('equal')
self.figure.canvas.draw()
# QtWidgets.QApplication.processEvents()
self.background = self.figure.canvas.copy_from_bbox(self.axes.bbox)
# self.image = self.axes.imshow(self.data[0].data, origin='upper',
# extent=(x_1, x_2, y_1, y_2))
self.image = imshow(self.axes, self.data[0].data, origin='upper',
extent=(x_1, x_2, y_1, y_2))
# This line prevents imshow from generating color values on the
# toolbar
self.image.format_cursor_data = lambda x: ""
self.update_graph()
self.cid = self.figure.canvas.mpl_connect('resize_event',
self.revent)
self.figure.canvas.draw()
def move(self, event):
"""
Mouse is moving.
Parameters
----------
event : TYPE
Event.
Returns
-------
None.
"""
if not self.data or self.gmode == 'Contour':
return
if event.inaxes == self.axes:
if self.flagresize is True:
self.flagresize = False
self.init_graph()
zval = [-999, -999, -999]
for i in self.data:
itlx = i.extent[0]
itly = i.extent[-1]
for j in range(3):
if i.dataid == self.hband[j]:
col = int((event.xdata - itlx)/i.xdim)
row = int((itly - event.ydata)/i.ydim)
zval[j] = i.data[row, col]
if self.gmode == 'Single Color Map':
bnum = self.update_hist_single(zval[0])
self.figure.canvas.restore_region(self.bbox_hist_red)
self.argb[0].draw_artist(self.htxt[0])
self.argb[0].draw_artist(self.hhist[0][2][bnum])
self.argb[0].draw_artist(self.clipvalu[0])
self.argb[0].draw_artist(self.clipvall[0])
self.figure.canvas.update()
if 'Ternary' in self.gmode:
bnum = self.update_hist_rgb(zval)
self.figure.canvas.restore_region(self.bbox_hist_red)
self.figure.canvas.restore_region(self.bbox_hist_green)
self.figure.canvas.restore_region(self.bbox_hist_blue)
for j in range(3):
self.argb[j].draw_artist(self.htxt[j])
self.argb[j].draw_artist(self.hhist[j][2][bnum[j]])
if self.clipvalu[j] is not None:
self.argb[j].draw_artist(self.clipvalu[j])
if self.clipvall[j] is not None:
self.argb[j].draw_artist(self.clipvall[j])
self.figure.canvas.update()
def update_contour(self):
"""
Update contours.
Returns
-------
None.
"""
x1, x2, y1, y2 = self.data[0].extent
self.image.set_visible(False)
for i in self.data:
if i.dataid == self.hband[0]:
dat = i.data.copy()
if self.htype == 'Histogram Equalization':
dat = histeq(dat)
elif self.clippercl > 0. or self.clippercu > 0.:
dat, _, _ = histcomp(dat, perc=self.clippercl,
uperc=self.clippercu)
self.image.set_data(dat)
xdim = (x2-x1)/dat.data.shape[1]/2
ydim = (y2-y1)/dat.data.shape[0]/2
xi = np.linspace(x1+xdim, x2-xdim, dat.data.shape[1])
yi = np.linspace(y2-ydim, y1+ydim, dat.data.shape[0])
self.cnt = self.axes.contour(xi, yi, dat, extent=(x1, x2, y1, y2),
linewidths=1, colors='k',
linestyles='solid')
self.cntf = self.axes.contourf(xi, yi, dat, extent=(x1, x2, y1, y2),
cmap=self.cbar)
self.ccbar = self.figure.colorbar(self.cntf, ax=self.axes)
self.figure.canvas.draw()
def update_graph(self):
"""
Update plot.
Returns
-------
None.
"""
if self.ccbar is not None:
self.ccbar.remove()
self.ccbar = None
if not self.data or self.gmode is None:
return
for i in range(3):
self.argb[i].clear()
if self.gmode == 'Single Color Map':
self.update_single_color_map()
if self.gmode == 'Contour':
self.update_contour()
if 'Ternary' in self.gmode:
self.update_rgb()
if self.gmode == 'Sunshade':
self.update_shade_plot()
def update_hist_rgb(self, zval):
"""
Update the rgb histograms.
Parameters
----------
zval : numpy array
Data values.
Returns
-------
bnum : list
Bin numbers.
"""
hcol = ['r', 'g', 'b']
if 'CMY' in self.gmode:
hcol = ['c', 'm', 'y']
hst = self.hhist
bnum = []
for i in range(3):
bins, patches = hst[i][1:]
for j in patches:
j.set_color(hcol[i])
if np.ma.is_masked(zval[i]) is True or zval[i] is None:
bnum.append(0)
self.update_hist_text(self.htxt[i], None)
continue
binnum = (bins < zval[i]).sum()-1
if -1 < binnum < len(patches):
patches[binnum].set_color('k')
bnum.append(binnum)
else:
bnum.append(0)
self.update_hist_text(self.htxt[i], zval[i])
return bnum
def update_hist_single(self, zval=None, hno=0):
"""
Update the color on a single histogram.
Parameters
----------
zval : float
Data value.
hno : int, optional
Histogram number. The default is 0.
Returns
-------
binnum : int
Number of bins.
"""
hst = self.hhist[hno]
bins, patches = hst[1:]
binave = np.arange(0, 1, 1/(bins.size-2))
if hno == 0:
# bincol = self.cbar(binave)
bincol = self.newcmp(binave)
else:
bincol = cm.get_cmap('gray')(binave)
for j, _ in enumerate(patches):
patches[j].set_color(bincol[j])
# This section draws the black line.
if zval is None or np.ma.is_masked(zval) is True:
self.update_hist_text(self.htxt[hno], None)
return 0
binnum = (bins < zval).sum()-1
if binnum < 0 or binnum >= len(patches):
self.update_hist_text(self.htxt[hno], zval)
return 0
patches[binnum].set_color('k')
self.update_hist_text(self.htxt[hno], zval)
return binnum
def update_hist_text(self, hst, zval):
"""
Update the value on the histogram.
Parameters
----------
hst : histogram
Histogram.
zval : float
Data value.
Returns
-------
None.
"""
xmin, xmax, ymin, ymax = hst.axes.axis()
xnew = 0.95*(xmax-xmin)+xmin
ynew = 0.95*(ymax-ymin)+ymin
hst.set_position((xnew, ynew))
if zval is None:
hst.set_text('')
else:
hst.set_text(f'{zval:.4f}')
def update_rgb(self):
"""
Update the RGB Ternary Map.
Returns
-------
None.
"""
dat = [None, None, None]
for i in self.data:
for j in range(3):
if i.dataid == self.hband[j]:
dat[j] = i.data
red = dat[0]
green = dat[1]
blue = dat[2]
mask = np.logical_or(red.mask, green.mask)
mask = np.logical_or(mask, blue.mask)
lclip = [0, 0, 0]
uclip = [0, 0, 0]
if self.htype == 'Histogram Equalization':
red = histeq(red)
green = histeq(green)
blue = histeq(blue)
elif self.clippercl > 0. or self.clippercu > 0.:
red, lclip[0], uclip[0] = histcomp(red, perc=self.clippercl,
uperc=self.clippercu)
green, lclip[1], uclip[1] = histcomp(green, perc=self.clippercl,
uperc=self.clippercu)
blue, lclip[2], uclip[2] = histcomp(blue, perc=self.clippercl,
uperc=self.clippercu)
colormap = np.ma.ones((red.shape[0], red.shape[1], 4))
colormap[:, :, 0] = norm2(red)
colormap[:, :, 1] = norm2(green)
colormap[:, :, 2] = norm2(blue)
if 'CMY' in self.gmode:
colormap[:, :, 0] = (1-colormap[:, :, 0])*(1-self.kval)
colormap[:, :, 1] = (1-colormap[:, :, 1])*(1-self.kval)
colormap[:, :, 2] = (1-colormap[:, :, 2])*(1-self.kval)
snorm = self.update_shade_plot()
colormap[:, :, 0] *= snorm # red
colormap[:, :, 1] *= snorm # green
colormap[:, :, 2] *= snorm # blue
colormap[:, :, 3] = np.logical_not(mask)
self.image.set_data(colormap)
for i, hdata in enumerate([red, green, blue]):
if ((self.clippercu > 0. or self.clippercl > 0.) and
self.fullhist is True):
self.hhist[i] = self.argb[i].hist(dat[i].compressed(), 50,
ec='none')
self.clipvall[i] = self.argb[i].axvline(lclip[i], ls='--')
self.clipvalu[i] = self.argb[i].axvline(uclip[i], ls='--')
else:
self.hhist[i] = self.argb[i].hist(hdata.compressed(), 50,
ec='none')
self.htxt[i] = self.argb[i].text(0., 0., '', ha='right', va='top')
self.argb[i].set_xlim(self.hhist[i][1].min(),
self.hhist[i][1].max())
self.argb[i].set_ylim(0, self.hhist[i][0].max()*1.2)
self.update_hist_rgb([None, None, None])
self.axes.draw_artist(self.image)
for j in range(3):
for i in self.hhist[j][2]:
self.argb[j].draw_artist(i)
self.figure.canvas.update()
self.bbox_hist_red = self.figure.canvas.copy_from_bbox(
self.argb[0].bbox)
self.bbox_hist_green = self.figure.canvas.copy_from_bbox(
self.argb[1].bbox)
self.bbox_hist_blue = self.figure.canvas.copy_from_bbox(
self.argb[2].bbox)
for j in range(3):
self.argb[j].draw_artist(self.htxt[j])
self.figure.canvas.update()
self.figure.canvas.flush_events()
def update_single_color_map(self):
"""
Update the single color map.
Returns
-------
None.
"""
for i in self.data:
if i.dataid == self.hband[0]:
pseudo = i.data.copy()
mask = np.ma.getmaskarray(pseudo)
pseudoold = pseudo.copy()
lclip = pseudo.min()
uclip = pseudo.max()
if self.htype == 'Histogram Equalization':
pseudo = histeq(pseudo)
elif self.clippercl > 0. or self.clippercu > 0.:
pseudo, lclip, uclip = histcomp(pseudo, perc=self.clippercl,
uperc=self.clippercu)
colormap = self.cbar(norm2(pseudo))
snorm = self.update_shade_plot()
colormap[:, :, 0] *= snorm # red
colormap[:, :, 1] *= snorm # green
colormap[:, :, 2] *= snorm # blue
colormap[:, :, 3] = np.logical_not(mask)
self.image.set_data(colormap)
self.newcmp = self.cbar
if ((self.clippercu > 0. or self.clippercl > 0.) and
self.fullhist is True):
tmp = np.histogram(pseudoold.compressed(), 50)[1]
filt = (tmp > lclip) & (tmp < uclip)
bcnt = np.sum(filt)
cols = self.cbar(np.linspace(0, 1, bcnt))
tmp = np.nonzero(filt)
tmp1 = np.vstack(([cols[0]]*tmp[0][0], cols,
[cols[-1]]*(49-tmp[0][-1])))
self.newcmp = ListedColormap(tmp1)
self.hhist[0] = self.argb[0].hist(pseudoold.compressed(), 50,
ec='none')
else:
self.hhist[0] = self.argb[0].hist(pseudo.compressed(), 50,
ec='none')
self.htxt[0] = self.argb[0].text(0.0, 0.0, '', ha='right', va='top')
self.argb[0].set_xlim(self.hhist[0][1].min(), self.hhist[0][1].max())
self.argb[0].set_ylim(0, self.hhist[0][0].max()*1.2)
self.clipvall[0] = self.argb[0].axvline(lclip, ls='--')
self.clipvalu[0] = self.argb[0].axvline(uclip, ls='--')
self.update_hist_single()
self.axes.draw_artist(self.image)
for i in self.hhist[0][2]:
self.argb[0].draw_artist(i)
self.figure.canvas.update()
self.bbox_hist_red = self.figure.canvas.copy_from_bbox(
self.argb[0].bbox)
self.argb[0].draw_artist(self.htxt[0])
self.figure.canvas.update()
def update_shade_plot(self):
"""
Update sun shade plot.
Returns
-------
None.
"""
if self.shade is not True:
return 1
for i in self.sdata:
if i.dataid == self.hband[3]:
sun = i.data
sunshader = currentshader(sun.data, self.cell, self.theta,
self.phi, self.alpha)
snorm = norm2(sunshader)
return snorm
class MySunCanvas(FigureCanvasQTAgg):
"""
Canvas for the sunshading tool.
Attributes
----------
sun: matplotlib plot instance
plot of a circle 'o' showing where the sun is
axes: matplotlib axes instance
axes on which the sun is drawn
"""
def __init__(self, parent=None):
fig = Figure()
super().__init__(fig)
self.sun = None
self.axes = fig.add_subplot(111, polar=True)
self.setParent(parent)
self.setMaximumSize(200, 200)
self.setMinimumSize(120, 120)
def init_graph(self):
"""
Initialise graph.
Returns
-------
None.
"""
self.axes.clear()
# self.axes.xaxis.set_tick_params(labelsize=8)
self.axes.tick_params(labelleft=False, labelright=False)
self.axes.set_autoscaley_on(False)
self.axes.set_rmax(1.0)
self.axes.set_rmin(0.0)
self.axes.set_xticklabels([])
self.sun, = self.axes.plot(np.pi/4., cos(np.pi/4.), 'o')
self.figure.tight_layout()
self.figure.canvas.draw()
class PlotInterp(QtWidgets.QDialog):
"""
The primary class for the raster data interpretation module.
The main interface is set up from here, as well as monitoring of the mouse
over the sunshading.
The PlotInterp class allows for the display of raster data in a variety of
modes, as well as the export of that display to GeoTiff format.
Attributes
----------
parent : parent
reference to the parent routine
indata : dictionary
dictionary of input datasets
outdata : dictionary
dictionary of output datasets
self.mmc : pygmi.raster.ginterp.MyMplCanvas, FigureCanvas
main canvas containing the image
self.msc : pygmi.raster.ginterp.MySunCanvas, FigureCanvas
small canvas containing the sunshading control
"""
def __init__(self, parent=None):
super().__init__(parent)
if parent is None:
self.showprocesslog = print
else:
self.showprocesslog = parent.showprocesslog
self.indata = {}
self.outdata = {}
self.parent = parent
self.units = {}
self.mmc = MyMplCanvas(self)
self.msc = MySunCanvas(self)
self.btn_saveimg = QtWidgets.QPushButton('Save GeoTiff')
self.chk_histtype = QtWidgets.QCheckBox('Full histogram with clip '
'lines')
self.cbox_dtype = QtWidgets.QComboBox()
self.cbox_band1 = QtWidgets.QComboBox()
self.cbox_band2 = QtWidgets.QComboBox()
self.cbox_band3 = QtWidgets.QComboBox()
self.cbox_bands = QtWidgets.QComboBox()
self.cbox_htype = QtWidgets.QComboBox()
self.lineclipu = QtWidgets.QLineEdit()
self.lineclipl = QtWidgets.QLineEdit()
# self.cbox_hstype = QtWidgets.QComboBox()
self.cbox_cbar = QtWidgets.QComboBox(self)
self.kslider = QtWidgets.QSlider(QtCore.Qt.Horizontal) # cmyK
self.sslider = QtWidgets.QSlider(QtCore.Qt.Horizontal) # sunshade
self.aslider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
# self.slabel = QtWidgets.QLabel('Sunshade Stretch:')
self.label4 = QtWidgets.QLabel('Sunshade Data:')
self.labels = QtWidgets.QLabel('Sunshade Detail')
self.labela = QtWidgets.QLabel('Light Reflectance')
self.labelc = QtWidgets.QLabel('Color Bar:')
self.labelk = QtWidgets.QLabel('K value:')
# self.chkbox_sun = QtWidgets.QCheckBox('Apply Sun Shading:')
self.gbox_sun = QtWidgets.QGroupBox('Sunshading')
self.setupui()
self.change_cbar()
self.setFocus()
self.mmc.gmode = 'Single Color Map'
self.mmc.argb[0].set_visible(True)
self.mmc.argb[1].set_visible(False)
self.mmc.argb[2].set_visible(False)
# self.slabel.hide()
# self.cbox_hstype.hide()
self.cbox_band1.show()
self.cbox_band2.hide()
self.cbox_band3.hide()
self.sslider.hide()
self.aslider.hide()
self.kslider.hide()
self.msc.hide()
self.labela.hide()
self.labels.hide()
self.labelk.hide()
self.label4.hide()
self.cbox_bands.hide()
def setupui(self):
"""
Set up UI.
Returns
-------
None.
"""
helpdocs = menu_default.HelpButton('pygmi.raster.ginterp')
btn_apply = QtWidgets.QPushButton('Apply Histogram')
gbox1 = QtWidgets.QGroupBox('Display Type')
v1 = QtWidgets.QVBoxLayout()
gbox1.setLayout(v1)
gbox2 = QtWidgets.QGroupBox('Data Bands')
v2 = QtWidgets.QVBoxLayout()
gbox2.setLayout(v2)
gbox3 = QtWidgets.QGroupBox('Histogram Stretch')
v3 = QtWidgets.QVBoxLayout()
gbox3.setLayout(v3)
gbox1.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
gbox2.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
gbox3.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
self.gbox_sun.setSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
v4 = QtWidgets.QVBoxLayout()
self.gbox_sun.setLayout(v4)
self.gbox_sun.setCheckable(True)
self.gbox_sun.setChecked(False)
vbl_raster = QtWidgets.QVBoxLayout()
hbl_all = QtWidgets.QHBoxLayout(self)
vbl_right = QtWidgets.QVBoxLayout()
mpl_toolbar = NavigationToolbar2QT(self.mmc, self)
spacer = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Expanding)
self.sslider.setMinimum(1)
self.sslider.setMaximum(100)
self.sslider.setValue(25)
self.aslider.setMinimum(1)
self.aslider.setMaximum(100)
self.aslider.setSingleStep(1)
self.aslider.setValue(75)
self.kslider.setMinimum(1)
self.kslider.setMaximum(100)
self.kslider.setValue(1)
# self.lineclip.setInputMask('00.0')
self.lineclipu.setPlaceholderText('Upper Percent Clip (0 Default)')
self.lineclipl.setPlaceholderText('Lower Percent Clip (0 Default)')
self.btn_saveimg.setAutoDefault(False)
helpdocs.setAutoDefault(False)
btn_apply.setAutoDefault(False)
tmp = sorted(m for m in colormaps())
self.cbox_cbar.addItem('jet')
self.cbox_cbar.addItem('viridis')
self.cbox_cbar.addItem('terrain')
self.cbox_cbar.addItems(tmp)
self.cbox_dtype.addItems(['Single Color Map', 'Contour', 'RGB Ternary',
'CMY Ternary'])
self.cbox_htype.addItems(['Linear with Percent Clip',
'Histogram Equalization'])
self.setWindowTitle('Raster Data Interpretation')
v1.addWidget(self.cbox_dtype)
v1.addWidget(self.labelk)
v1.addWidget(self.kslider)
vbl_raster.addWidget(gbox1)
v2.addWidget(self.cbox_band1)
v2.addWidget(self.cbox_band2)
v2.addWidget(self.cbox_band3)
vbl_raster.addWidget(gbox2)
v3.addWidget(self.cbox_htype)
v3.addWidget(self.lineclipl)
v3.addWidget(self.lineclipu)
v3.addWidget(self.chk_histtype)
v3.addWidget(btn_apply)
v3.addWidget(self.labelc)
v3.addWidget(self.cbox_cbar)
vbl_raster.addWidget(gbox3)
vbl_raster.addWidget(self.gbox_sun)
v4.addWidget(self.label4)
v4.addWidget(self.cbox_bands)
v4.addWidget(self.msc)
v4.addWidget(self.labels)
v4.addWidget(self.sslider)
v4.addWidget(self.labela)
v4.addWidget(self.aslider)
vbl_raster.addItem(spacer)
vbl_raster.addWidget(self.btn_saveimg)
vbl_raster.addWidget(helpdocs)
vbl_right.addWidget(self.mmc)
vbl_right.addWidget(mpl_toolbar)
hbl_all.addLayout(vbl_raster)
hbl_all.addLayout(vbl_right)
self.cbox_cbar.currentIndexChanged.connect(self.change_cbar)
self.cbox_dtype.currentIndexChanged.connect(self.change_dtype)
self.cbox_htype.currentIndexChanged.connect(self.change_htype)
self.sslider.sliderReleased.connect(self.change_sunsliders)
self.aslider.sliderReleased.connect(self.change_sunsliders)
self.kslider.sliderReleased.connect(self.change_dtype)
self.msc.figure.canvas.mpl_connect('button_press_event', self.move)
self.btn_saveimg.clicked.connect(self.save_img)
self.gbox_sun.clicked.connect(self.change_dtype)
btn_apply.clicked.connect(self.change_lclip)
self.lineclipu.returnPressed.connect(self.change_lclip_upper)
self.lineclipl.returnPressed.connect(self.change_lclip_lower)
self.chk_histtype.clicked.connect(self.change_dtype)
if self.parent is not None:
self.resize(self.parent.width(), self.parent.height())
def change_lclip(self):
"""
Change the linear clip percentage.
Returns
-------
None.
"""
self.change_lclip_lower()
self.change_lclip_upper()
def change_lclip_upper(self):
"""
Change the linear clip percentage.
Returns
-------
None.
"""
txt = self.lineclipu.text()
try:
clip = float(txt)
except ValueError:
if txt == '':
clip = 0.0
else:
clip = self.mmc.clippercu
self.lineclipu.setText(str(clip))
if clip < 0.0 or clip >= 100.0:
clip = self.mmc.clippercu
self.lineclipu.setText(str(clip))
self.mmc.clippercu = clip
self.change_dtype()
def change_lclip_lower(self):
"""
Change the linear clip percentage.
Returns
-------
None.
"""
txt = self.lineclipl.text()
try:
clip = float(txt)
except ValueError:
if txt == '':
clip = 0.0
else:
clip = self.mmc.clippercl
self.lineclipl.setText(str(clip))
if clip < 0.0 or clip >= 100.0:
clip = self.mmc.clippercl
self.lineclipl.setText(str(clip))
self.mmc.clippercl = clip
self.change_dtype()
def change_blue(self):
"""
Change the blue or third display band.
Returns
-------
None.
"""
txt = str(self.cbox_band3.currentText())
self.mmc.hband[2] = txt
self.mmc.init_graph()
def change_cbar(self):
"""
Change the color map for the color bar.
Returns
-------
None.
"""
txt = str(self.cbox_cbar.currentText())
self.mmc.cbar = cm.get_cmap(txt)
self.mmc.update_graph()
def change_dtype(self):
"""
Change display type.
Returns
-------
None.
"""
self.mmc.figure.canvas.mpl_disconnect(self.mmc.cid)
txt = str(self.cbox_dtype.currentText())
self.mmc.gmode = txt
self.cbox_band1.show()
self.mmc.fullhist = self.chk_histtype.isChecked()
if txt == 'Single Color Map':
# self.slabel.hide()
self.labelc.show()
self.labelk.hide()
self.cbox_band2.hide()
self.cbox_band3.hide()
self.cbox_cbar.show()
self.mmc.argb[0].set_visible(True)
self.mmc.argb[1].set_visible(False)
self.mmc.argb[2].set_visible(False)
self.sslider.hide()
self.aslider.hide()
self.kslider.hide()
if txt == 'Contour':
self.labelk.hide()
self.labelc.show()
self.cbox_band2.hide()
self.cbox_band3.hide()
self.cbox_cbar.show()
self.mmc.argb[0].set_visible(False)
self.mmc.argb[1].set_visible(False)
self.mmc.argb[2].set_visible(False)
self.sslider.hide()
self.aslider.hide()
self.kslider.hide()
self.gbox_sun.setChecked(False)
if 'Ternary' in txt:
self.labelk.hide()
self.labelc.hide()
self.cbox_band2.show()
self.cbox_band3.show()
self.cbox_cbar.hide()
self.mmc.argb[0].set_visible(True)
self.mmc.argb[1].set_visible(True)
self.mmc.argb[2].set_visible(True)
self.sslider.hide()
self.aslider.hide()
self.kslider.hide()
if 'CMY' in txt:
self.kslider.show()
self.labelk.show()
self.mmc.kval = float(self.kslider.value())/100.
if self.gbox_sun.isChecked():
self.msc.show()
self.label4.show()
self.cbox_bands.show()
self.sslider.show()
self.aslider.show()
self.labela.show()
self.labels.show()
self.mmc.cell = self.sslider.value()
self.mmc.alpha = float(self.aslider.value())/100.
self.mmc.shade = True
# self.cbox_bands.setCurrentText(self.cbox_band1.currentText())
self.msc.init_graph()
else:
self.msc.hide()
self.labela.hide()
self.labels.hide()
self.label4.hide()
self.cbox_bands.hide()
self.mmc.shade = False
self.mmc.cid = self.mmc.figure.canvas.mpl_connect('resize_event',
self.mmc.revent)
self.mmc.init_graph()
def change_sunsliders(self):
"""
Change the sun shading sliders.
Returns
-------
None.
"""
self.mmc.cell = self.sslider.value()
self.mmc.alpha = float(self.aslider.value())/100.
self.mmc.init_graph()
def change_green(self):
"""
Change the green or second band.
Returns
-------
None.
"""
txt = str(self.cbox_band2.currentText())
self.mmc.hband[1] = txt
self.mmc.init_graph()
def change_htype(self):
"""
Change the histogram stretch to apply to the normal data.
Returns
-------
None.
"""
txt = str(self.cbox_htype.currentText())
if txt == 'Histogram Equalization':
self.lineclipl.hide()
self.lineclipu.hide()
else:
self.lineclipl.show()
self.lineclipu.show()
self.mmc.htype = txt
self.mmc.init_graph()
def change_red(self):
"""
Change the red or first band.
Returns
-------
None.
"""
txt = str(self.cbox_band1.currentText())
self.mmc.hband[0] = txt
self.mmc.init_graph()
def change_sun(self):
"""
Change the sunshade band.
Returns
-------
None.
"""
txt = str(self.cbox_bands.currentText())
self.mmc.hband[3] = txt
self.mmc.init_graph()
def data_init(self):
"""
Initialise Data.
Entry point into routine. This entry point exists for
the case where data must be initialised before entering at the
standard 'settings' sub module.
Returns
-------
None.
"""
if 'Cluster' in self.indata:
self.indata = copy.deepcopy(self.indata)
self.indata = dataprep.cluster_to_raster(self.indata)
if 'Raster' not in self.indata:
return
self.indata['Raster'] = dataprep.merge(self.indata['Raster'])
data = self.indata['Raster']
sdata = self.indata['Raster']
for i in data:
self.units[i.dataid] = i.units
self.mmc.data = data
self.mmc.sdata = sdata
self.mmc.hband[0] = data[0].dataid
self.mmc.hband[1] = data[0].dataid
self.mmc.hband[2] = data[0].dataid
self.mmc.hband[3] = data[0].dataid
blist = []
for i in data:
blist.append(i.dataid)
try:
self.cbox_band1.currentIndexChanged.disconnect()
self.cbox_band2.currentIndexChanged.disconnect()
self.cbox_band3.currentIndexChanged.disconnect()
self.cbox_bands.currentIndexChanged.disconnect()
except TypeError:
pass
self.cbox_band1.clear()
self.cbox_band2.clear()
self.cbox_band3.clear()
self.cbox_bands.clear()
self.cbox_band1.addItems(blist)
self.cbox_band2.addItems(blist)
self.cbox_band3.addItems(blist)
self.cbox_bands.addItems(blist)
self.cbox_band1.currentIndexChanged.connect(self.change_red)
self.cbox_band2.currentIndexChanged.connect(self.change_green)
self.cbox_band3.currentIndexChanged.connect(self.change_blue)
self.cbox_bands.currentIndexChanged.connect(self.change_sun)
def move(self, event):
"""
Move event is used to track changes to the sunshading.
Parameters
----------
event : matplotlib button press event
Event returned by matplotlib when a button is pressed
Returns
-------
None.
"""
if event.inaxes == self.msc.axes:
self.msc.sun.set_xdata(event.xdata)
self.msc.sun.set_ydata(event.ydata)
self.msc.figure.canvas.draw()
phi = -event.xdata
theta = np.pi/2. - np.arccos(event.ydata)
self.mmc.phi = phi
self.mmc.theta = theta
self.mmc.update_graph()
def save_img(self):
"""
Save image as a GeoTiff.
Returns
-------
bool
True if successful, False otherwise.
"""
snorm = self.mmc.update_shade_plot()
ext = 'GeoTiff (*.tif)'
filename, _ = QtWidgets.QFileDialog.getSaveFileName(
self.parent, 'Save File', '.', ext)
if filename == '':
return False
text, okay = QtWidgets.QInputDialog.getText(
self, 'Colorbar', 'Enter length in inches:',
QtWidgets.QLineEdit.Normal, '4')
if not okay:
return False
try:
blen = float(text)
except ValueError:
QtWidgets.QMessageBox.warning(self.parent, 'Error',
'Invalid value.',
QtWidgets.QMessageBox.Ok)
return False
bwid = blen/16.
dtype = str(self.cbox_dtype.currentText())
rtext = 'Red'
gtext = 'Green'
btext = 'Blue'
if 'Ternary' not in dtype:
text, okay = QtWidgets.QInputDialog.getText(
self, 'Colorbar', 'Enter colorbar unit label:',
QtWidgets.QLineEdit.Normal,
self.units[str(self.cbox_band1.currentText())])
if not okay:
return False
else:
units = str(self.cbox_band1.currentText())
rtext, okay = QtWidgets.QInputDialog.getText(
self, 'Ternary Colorbar', 'Enter red/cyan label:',
QtWidgets.QLineEdit.Normal, units)
if not okay:
return False
units = str(self.cbox_band2.currentText())
gtext, okay = QtWidgets.QInputDialog.getText(
self, 'Ternary Colorbar', 'Enter green/magenta label:',
QtWidgets.QLineEdit.Normal, units)
if not okay:
return False
units = str(self.cbox_band3.currentText())
btext, okay = QtWidgets.QInputDialog.getText(
self, 'Ternary Colorbar', 'Enter blue/yelow label:',
QtWidgets.QLineEdit.Normal, units)
if not okay:
return False
htype = str(self.cbox_htype.currentText())
clippercl = self.mmc.clippercl
clippercu = self.mmc.clippercu
if dtype == 'Single Color Map':
for i in self.mmc.data:
if i.dataid == self.mmc.hband[0]:
pseudo = i.data
if htype == 'Histogram Equalization':
pseudo = histeq(pseudo)
elif clippercl > 0. or clippercu > 0.:
pseudo, _, _ = histcomp(pseudo, perc=clippercl,
uperc=clippercu)
cmin = pseudo.min()
cmax = pseudo.max()
# The function below normalizes as well.
img = img2rgb(pseudo, self.mmc.cbar)
pseudo = None
img[:, :, 0] = img[:, :, 0]*snorm # red
img[:, :, 1] = img[:, :, 1]*snorm # green
img[:, :, 2] = img[:, :, 2]*snorm # blue
img = img.astype(np.uint8)
elif 'Ternary' in dtype:
dat = [None, None, None]
for i in self.mmc.data:
for j in range(3):
if i.dataid == self.mmc.hband[j]:
dat[j] = i.data
red = dat[0]
green = dat[1]
blue = dat[2]
mask = np.logical_or(red.mask, green.mask)
mask = np.logical_or(mask, blue.mask)
mask = np.logical_not(mask)
if htype == 'Histogram Equalization':
red = histeq(red)
green = histeq(green)
blue = histeq(blue)
elif clippercl > 0. or clippercu > 0.:
red, _, _ = histcomp(red, perc=clippercl, uperc=clippercu)
green, _, _ = histcomp(green, perc=clippercl, uperc=clippercu)
blue, _, _ = histcomp(blue, perc=clippercl, uperc=clippercu)
cmin = red.min()
cmax = red.max()
img = np.ones((red.shape[0], red.shape[1], 4), dtype=np.uint8)
img[:, :, 3] = mask*254+1
if 'CMY' in dtype:
img[:, :, 0] = (1-norm2(red))*254+1
img[:, :, 1] = (1-norm2(green))*254+1
img[:, :, 2] = (1-norm2(blue))*254+1
else:
img[:, :, 0] = norm255(red)
img[:, :, 1] = norm255(green)
img[:, :, 2] = norm255(blue)
img[:, :, 0] = img[:, :, 0]*snorm # red
img[:, :, 1] = img[:, :, 1]*snorm # green
img[:, :, 2] = img[:, :, 2]*snorm # blue
img = img.astype(np.uint8)
elif dtype == 'Contour':
pseudo = self.mmc.image._full_res.copy()
if htype == 'Histogram Equalization':
pseudo = histeq(pseudo)
elif clippercl > 0. or clippercu > 0.:
pseudo, _, _ = histcomp(pseudo, perc=clippercl,
uperc=clippercu)
cmin = pseudo.min()
cmax = pseudo.max()
if self.mmc.ccbar is not None:
self.mmc.ccbar.remove()
self.mmc.ccbar = None
self.mmc.figure.set_frameon(False)
self.mmc.axes.set_axis_off()
tmpsize = self.mmc.figure.get_size_inches()
self.mmc.figure.set_size_inches(tmpsize*3)
self.mmc.figure.canvas.draw()
img = np.frombuffer(self.mmc.figure.canvas.tostring_argb(),
dtype=np.uint8)
w, h = self.mmc.figure.canvas.get_width_height()
self.mmc.figure.set_size_inches(tmpsize)
self.mmc.figure.set_frameon(True)
self.mmc.axes.set_axis_on()
self.mmc.figure.canvas.draw()
img.shape = (h, w, 4)
img = np.roll(img, 3, axis=2)
cmask = np.ones(img.shape[1], dtype=bool)
for i in range(img.shape[1]):
if img[:, i, 3].mean() == 0:
cmask[i] = False
img = img[:, cmask]
rmask = np.ones(img.shape[0], dtype=bool)
for i in range(img.shape[0]):
if img[i, :, 3].mean() == 0:
rmask[i] = False
img = img[rmask]
mask = img[:, :, 3]
os.chdir(os.path.dirname(filename))
newimg = [copy.deepcopy(self.mmc.data[0]),
copy.deepcopy(self.mmc.data[0]),
copy.deepcopy(self.mmc.data[0]),
copy.deepcopy(self.mmc.data[0])]
newimg[0].data = img[:, :, 0]
newimg[1].data = img[:, :, 1]
newimg[2].data = img[:, :, 2]
newimg[3].data = img[:, :, 3]
mask = img[:, :, 3]
newimg[0].data[newimg[0].data == 0] = 1
newimg[1].data[newimg[1].data == 0] = 1
newimg[2].data[newimg[2].data == 0] = 1
newimg[0].data[mask <= 1] = 0
newimg[1].data[mask <= 1] = 0
newimg[2].data[mask <= 1] = 0
newimg[0].nullvalue = 0
newimg[1].nullvalue = 0
newimg[2].nullvalue = 0
newimg[3].nullvalue = 0
newimg[0].dataid = rtext
newimg[1].dataid = gtext
newimg[2].dataid = btext
newimg[3].dataid = 'Alpha'
iodefs.export_gdal(str(filename), newimg, 'GTiff')
# Section for colorbars
if 'Ternary' not in dtype:
txt = str(self.cbox_cbar.currentText())
cmap = cm.get_cmap(txt)
norm = mcolors.Normalize(vmin=cmin, vmax=cmax)
# Horizontal Bar
fig = Figure()
canvas = FigureCanvasQTAgg(fig)
fig.set_figwidth(blen)
fig.set_figheight(bwid+0.75)
fig.set_tight_layout(True)
ax = fig.gca()
cb = mcolorbar.ColorbarBase(ax, cmap=cmap, norm=norm,
orientation='horizontal')
cb.set_label(text)
fname = filename[:-4]+'_hcbar.png'
canvas.print_figure(fname, dpi=300)
# Vertical Bar
fig = Figure()
canvas = FigureCanvasQTAgg(fig)
fig.set_figwidth(bwid+1)
fig.set_figheight(blen)
fig.set_tight_layout(True)
ax = fig.gca()
cb = mcolorbar.ColorbarBase(ax, cmap=cmap, norm=norm,
orientation='vertical')
cb.set_label(text)
fname = filename[:-4]+'_vcbar.png'
canvas.print_figure(fname, dpi=300)
else:
fig = Figure(figsize=[blen, blen])
canvas = FigureCanvasQTAgg(fig)
fig.set_tight_layout(True)
tmp = np.array([[list(range(255))]*255])
tmp.shape = (255, 255)
tmp = np.transpose(tmp)
red = ndimage.rotate(tmp, 0)
green = ndimage.rotate(tmp, 120)
blue = ndimage.rotate(tmp, -120)
tmp = np.zeros((blue.shape[0], 90))
blue = np.hstack((tmp, blue))
green = np.hstack((green, tmp))
rtmp = np.zeros_like(blue)
j = 92
rtmp[:255, j:j+255] = red
red = rtmp
if 'RGB' in dtype:
red = red.max()-red
green = green.max()-green
blue = blue.max()-blue
data = np.transpose([red.flatten(),
green.flatten(),
blue.flatten()])
data.shape = (red.shape[0], red.shape[1], 3)
data = data[:221, 90:350]
ax = fig.gca()
ax.set_xlim((-100, 355))
ax.set_ylim((-100, 322))
path = Path([[0, 0], [127.5, 222], [254, 0], [0, 0]])
patch = PathPatch(path, facecolor='none')
ax.add_patch(patch)
data = data.astype(int)
im = ax.imshow(data, extent=(0, 255, 0, 222), clip_path=patch,
clip_on=True)
im.set_clip_path(patch)
ax.text(0, -5, gtext, horizontalalignment='center',
verticalalignment='top', size=20)
ax.text(254, -5, btext, horizontalalignment='center',
verticalalignment='top', size=20)
ax.text(127.5, 225, rtext, horizontalalignment='center',
size=20)
ax.tick_params(top='off', right='off', bottom='off', left='off',
labelbottom='off', labelleft='off')
ax.axis('off')
fname = filename[:-4]+'_tern.png'
canvas.print_figure(fname, dpi=300)
QtWidgets.QMessageBox.information(self, 'Information',
'Save to GeoTiff is complete!',
QtWidgets.QMessageBox.Ok)
return True
def settings(self, nodialog=False):
"""
Entrypoint into class.
This is called when the used double clicks the routine from the
main PyGMI interface.
Returns
-------
bool
True if successful, False otherwise.
"""
if nodialog:
return True
if 'Raster' not in self.indata:
return False
if self.indata['Raster'][0].isrgb:
self.showprocesslog('RGB images cannot be used in this module.')
return False
self.mmc.init_graph()
self.msc.init_graph()
tmp = self.exec_()
if tmp == 0:
return False
return True
def loadproj(self, projdata):
"""
Load project data into class.
Parameters
----------
projdata : dictionary
Project data loaded from JSON project file.
Returns
-------
chk : bool
A check to see if settings was successfully run.
"""
return False
def saveproj(self):
"""
Save project data from class.
Returns
-------
projdata : dictionary
Project data to be saved to JSON project file.
"""
projdata = {}
# projdata['ftype'] = '2D Mean'
return projdata
def aspect2(data):
"""
Aspect of a dataset.
Parameters
----------
data : numpy MxN array
input data used for the aspect calculation
Returns
-------
adeg : numpy masked array
aspect in degrees
dzdx : numpy array
gradient in x direction
dzdy : numpy array
gradient in y direction
"""
cdy = np.array([[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]])
cdx = np.array([[1., 0., -1.], [2., 0., -2.], [1., 0., -1.]])
dzdx = ndimage.convolve(data, cdx) # Use convolve: matrix filtering
dzdy = ndimage.convolve(data, cdy) # 'valid' gets reduced array
dzdx = ne.evaluate('dzdx/8.')
dzdy = ne.evaluate('dzdy/8.')
# Aspect Section
pi = np.pi
adeg = ne.evaluate('90-arctan2(dzdy, -dzdx)*180./pi')
adeg = np.ma.masked_invalid(adeg)
adeg[np.ma.less(adeg, 0.)] += 360.
adeg[np.logical_and(dzdx == 0, dzdy == 0)] = -1.
return [adeg, dzdx, dzdy]
def currentshader(data, cell, theta, phi, alpha):
"""
Blinn shader - used for sun shading.
Parameters
----------
data : numpy array
Dataset to be shaded.
cell : float
between 1 and 100 - controls sunshade detail.
theta : float
sun elevation (also called g in code below)
phi : float
azimuth
alpha : float
how much incident light is reflected (0 to 1)
Returns
-------
R : numpy array
array containing the shaded results.
"""
asp = aspect2(data)
n = 2
pinit = asp[1]
qinit = asp[2]
p = ne.evaluate('pinit/cell')
q = ne.evaluate('qinit/cell')
sqrt_1p2q2 = ne.evaluate('sqrt(1+p**2+q**2)')
cosg2 = cos(theta/2)
p0 = -cos(phi)*tan(theta)
q0 = -sin(phi)*tan(theta)
sqrttmp = ne.evaluate('(1+sqrt(1+p0**2+q0**2))')
p1 = ne.evaluate('p0 / sqrttmp')
q1 = ne.evaluate('q0 / sqrttmp')
cosi = ne.evaluate('((1+p0*p+q0*q)/(sqrt_1p2q2*sqrt(1+p0**2+q0**2)))')
coss = ne.evaluate('((1+p1*p+q1*q)/(sqrt_1p2q2*sqrt(1+p1**2+q1**2)))')
Ps = ne.evaluate('coss**n')
R = np.ma.masked_invalid(ne.evaluate('((1-alpha)+alpha*Ps)*cosi/cosg2'))
return R
def histcomp(img, nbr_bins=None, perc=5., uperc=None):
"""
Histogram Compaction.
This compacts a % of the outliers in data, allowing for a cleaner, linear
representation of the data.
Parameters
----------
img : numpy array
data to compact
nbr_bins : int
number of bins to use in compaction
perc : float
percentage of histogram to clip. If uperc is not None, then this is
the lower percentage
uperc : float
upper percentage to clip. If uperc is None, then it is set to the
same value as perc
Returns
-------
img2 : numpy array
compacted array
"""
if uperc is None:
uperc = perc
if nbr_bins is None:
nbr_bins = max(img.shape)
nbr_bins = max(nbr_bins, 256)
# get image histogram
imask = np.ma.getmaskarray(img)
tmp = img.compressed()
imhist, bins = np.histogram(tmp, nbr_bins)
cdf = imhist.cumsum() # cumulative distribution function
if cdf[-1] == 0:
return img
cdf = cdf / float(cdf[-1]) # normalize
perc = perc/100.
uperc = uperc/100.
sindx = np.arange(nbr_bins)[cdf > perc][0]
if cdf[0] > (1-uperc):
eindx = 1
else:
eindx = np.arange(nbr_bins)[cdf < (1-uperc)][-1]+1
svalue = bins[sindx]
evalue = bins[eindx]
img2 = np.empty_like(img, dtype=np.float32)
np.copyto(img2, img)
filt = np.ma.less(img2, svalue)
img2[filt] = svalue
filt = np.ma.greater(img2, evalue)
img2[filt] = evalue
img2 = np.ma.array(img2, mask=imask)
return img2, svalue, evalue
def histeq(img, nbr_bins=32768):
"""
Histogram Equalization.
Equalizes the histogram to colors. This allows for seeing as much data as
possible in the image, at the expense of knowing the real value of the
data at a point. It bins the data equally - flattening the distribution.
Parameters
----------
img : numpy array
input data to be equalised
nbr_bins : integer
number of bins to be used in the calculation
Returns
-------
im2 : numpy array
output data
"""
# get image histogram
imhist, bins = np.histogram(img.compressed(), nbr_bins)
bins = (bins[1:]-bins[:-1])/2+bins[:-1] # get bin center point
cdf = imhist.cumsum() # cumulative distribution function
cdf = cdf - cdf[0] # subtract min, which is first val in cdf
cdf = cdf.astype(np.int64)
cdf = nbr_bins * cdf / cdf[-1] # norm to nbr_bins
# use linear interpolation of cdf to find new pixel values
im2 = np.interp(img, bins, cdf)
im2 = np.ma.array(im2, mask=img.mask)
return im2
def img2rgb(img, cbar=cm.get_cmap('jet')):
"""
Image to RGB.
convert image to 4 channel rgba color image.
Parameters
----------
img : numpy array
array to be converted to rgba image.
cbar : matplotlib color map
colormap to apply to the image
Returns
-------
im2 : numpy array
output rgba image
"""
im2 = img.copy()
im2 = norm255(im2)
cbartmp = cbar(range(255))
cbartmp = np.array([[0., 0., 0., 1.]]+cbartmp.tolist())*255
cbartmp = cbartmp.round()
cbartmp = cbartmp.astype(np.uint8)
im2 = cbartmp[im2]
im2[:, :, 3] = np.logical_not(img.mask)*254+1
return im2
def norm2(dat):
"""
Normalise array vector between 0 and 1.
Parameters
----------
dat : numpy array
array to be normalised
Returns
-------
out : numpy array of floats
normalised array
"""
datmin = float(dat.min())
datptp = float(dat.ptp())
out = np.ma.array(ne.evaluate('(dat-datmin)/datptp'))
out.mask = np.ma.getmaskarray(dat)
return out
def norm255(dat):
"""
Normalise array vector between 1 and 255.
Parameters
----------
dat : numpy array
array to be normalised
Returns
-------
out : numpy array of 8 bit integers
normalised array
"""
datmin = float(dat.min())
datptp = float(dat.ptp())
out = ne.evaluate('254*(dat-datmin)/datptp+1')
out = out.round()
out = out.astype(np.uint8)
return out
def _testfn():
"""Test routine."""
import matplotlib
matplotlib.interactive(False)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..//..')))
app = QtWidgets.QApplication(sys.argv)
data = iodefs.get_raster(r'E:\Workdata\raster\polygon cut get profile\mag_IGRFcorrected.ers')
# data = iodefs.get_raster(r'e:\WorkData\testdata.hdr')
tmp = PlotInterp(None)
tmp.indata['Raster'] = data
tmp.data_init()
tmp.settings()
if __name__ == "__main__":
_testfn()
| gpl-3.0 |
dingocuster/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cairo.py | 69 | 16706 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as npy
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
def set_ctx_from_surface (self, surface):
self.ctx = cairo.Context (surface)
self.ctx.save() # restore, save - when call new_gc()
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
#@staticmethod
def convert_path(ctx, tpath):
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
tpath = transform.transform_path(path)
ctx.new_path()
self.convert_path(ctx, tpath)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.ctx
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * npy.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * npy.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.ctx.restore() # matches save() in set_ctx_from_surface()
self.ctx.save()
return GraphicsContextCairo (renderer=self)
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.ctx = renderer.ctx
def set_alpha(self, alpha):
self._alpha = alpha
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
self._cliprect = rectangle
if rectangle is None:
return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
# Alternative: just set _cliprect here and actually set cairo clip rect
# in fill_and_stroke() inside ctx.save() ... ctx.restore()
def set_clip_path(self, path):
if path is not None:
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
tpath = affine.transform_path(tpath)
RendererCairo.convert_path(ctx, tpath)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (npy.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.ctx
if orientation == 'landscape':
ctx.rotate (npy.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| agpl-3.0 |
ankurankan/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
noisebridge/PythonClass | instructors/need-rework/6_socrata_matplotlib_workshop/date-demo.py | 3 | 1667 | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
"""
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
fig, ax = plt.subplots()
ax.plot(r.date, r.adj_close)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(r.date.min().year, 1, 1)
datemax = datetime.date(r.date.max().year+1, 1, 1)
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x): return '$%1.2f'%x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
| mit |
breznak/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| agpl-3.0 |
Capstone2017/Machine-Learning-NLP | notebook/timespan_plot.py | 1 | 1983 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 19:16:40 2016
@author: Nero
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
reader = pd.read_csv('application_data.csv', iterator=True)
loop = True
chunkSize = 100000
chunks = []
while loop:
try:
chunkonce = reader.get_chunk(chunkSize)
chunks.append(chunkonce)
'''chunks.append(chunk)'''
except StopIteration:
loop = False
print ('1')
chunk=pd.concat(chunks, ignore_index=True)
a=pd.DataFrame()
a['filing_date']=chunk['filing_date']
a['patent_issue_date']=chunk['patent_issue_date']
a=a[pd.notnull(a['filing_date'])]
a=a[pd.notnull(a['patent_issue_date'])]
timebegins=pd.to_datetime(a['filing_date'], format="%Y-%m-%d")
timeends=pd.to_datetime(a['patent_issue_date'], format="%Y-%m-%d")
timespanyears=timeends-timebegins
timespanyears=timespanyears.to_frame()
timespanyears[0]=timespanyears[0].astype(np.int64)
timespanyears=timespanyears[timespanyears[0]>0]
timespanyears[0]=timespanyears[0]/(1000000000*3600*24*365)
''' Remove for loop by remove nan from a and convert it to datetime
timespanyears=[]
for index in range(a.shape[0]):
if(type(a['filing_date'][index])==type('string')
and type(a['patent_issue_date'][index])==type('string')):
filing_dt=datetime.datetime.strptime(a['filing_date'][index],'%Y-%m-%d')
issue_dt=datetime.datetime.strptime(a['patent_issue_date'][index],'%Y-%m-%d')
timespan=issue_dt-filing_dt
x=timespan.total_seconds()/(3600*24*365)
if(x>=0):
timespanyears.append(x)
'''
application_number=[]
invention_subject_matter=[]
disposal_type=[]
'''z=np.histogram(timespanyears,bins=[0,1,2,3,4,5,6,7,8,9,10])'''
plt.hist(timespanyears[0],bins=np.linspace(0,8))
plt.title("Application time span(5,619,688/9,817,693)")
plt.xlabel("Years")
plt.ylabel("Amount")
plt.savefig('spandistribution',dpi=1000)
plt.show()
| mit |
modsy/incubator-airflow | airflow/hooks/dbapi_hook.py | 3 | 7095 |
from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_pandas_df(self, sql, parameters=None):
'''
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn, params=parameters)
conn.close()
return df
def get_records(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = self.get_cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
'''
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = conn.cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
cur.close()
conn.commit()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
l.append(self._serialize_cell(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell):
if isinstance(cell, basestring):
return "'" + str(cell).replace("'", "''") + "'"
elif cell is None:
return 'NULL'
elif isinstance(cell, numpy.datetime64):
return "'" + str(cell) + "'"
elif isinstance(cell, datetime):
return "'" + cell.isoformat() + "'"
else:
return str(cell)
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
spectralDNS/spectralDNS | tests/OrrSommerfeldr.py | 4 | 8316 | """Orr-Sommerfeld"""
import warnings
from numpy import real, pi, exp, zeros, imag, sqrt, log10
from spectralDNS import config, get_solver, solve
from spectralDNS.utilities import dx
#from spectralDNS.utilities import reset_profile
from OrrSommerfeld_shen import OrrSommerfeld
try:
import matplotlib.pyplot as plt
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
except ImportError:
warnings.warn("matplotlib not installed")
plt = None
#params.eps = 1e-9
def initOS(OS, eigvals, eigvectors, U, X, t=0.):
x = X[2][0, 0]
eigval, phi, dphidy = OS.interp(x, eigvals, eigvectors, eigval=1, verbose=False)
OS.eigval = eigval
for j in range(U.shape[1]):
y = X[0][j, 0, 0]
v = (1-x**2) + config.params.eps*real(dphidy*exp(1j*(y-eigval*t)))
u = -config.params.eps*real(1j*phi*exp(1j*(y-eigval*t)))
U[2, j, :, :] = u.repeat(U.shape[2]).reshape((len(x), U.shape[2])).T
U[0, j, :, :] = v.repeat(U.shape[2]).reshape((len(x), U.shape[2])).T
U[1] = 0
acc = zeros(1)
OS, e0 = None, None
def initialize(solver, context):
global OS, e0
params = config.params
OS = OrrSommerfeld(Re=params.Re, N=128)
eigvals, eigvectors = OS.solve(False)
OS.eigvals, OS.eigvectors = eigvals, eigvectors
U = context.U
X = context.X
FST = context.FST
initOS(OS, eigvals, eigvectors, U, X)
U_hat = solver.set_velocity(**context)
U = solver.get_velocity(**context)
# Compute convection from data in context (i.e., context.U_hat and context.g)
# This is the convection at t=0
e0 = 0.5*dx(U[2]**2+(U[0]-(1-X[2]**2))**2, context.FST, axis=2)
#print(e0)
acc[0] = 0.0
if 'KMMRK3' not in params.solver:
# Initialize at t = dt
context.H_hat1[:] = solver.get_convection(**context)
initOS(OS, eigvals, eigvectors, U, X, t=params.dt)
U_hat = solver.set_velocity(**context)
U = solver.get_velocity(**context)
context.U_hat0[:] = U_hat
params.t = params.dt
params.tstep = 1
e1 = 0.5*dx(U[2]**2+(U[0]-(1-X[2]**2))**2, context.FST, axis=2)
if solver.rank == 0:
acc[0] += abs(e1/e0 - exp(2*imag(OS.eigval)*params.t))
else:
params.t = 0
params.tstep = 0
if not "KMM" in params.solver:
P_hat = solver.compute_pressure(**context)
FST.backward(P_hat, context.P, context.SN)
else:
context.g[:] = 0
def set_Source(Source, Sk, FST, ST, N, **kw):
Source[:] = 0
Source[0] = -2./config.params.Re
Sk[:] = 0
if hasattr(FST, 'complex_shape'):
Sk[0] = FST.scalar_product(Source[0], Sk[0], ST)
else:
Sk[0] = FST.scalar_product(Source[0], Sk[0])
Sk[0, 0, 0, -2:] = 0
im1, im2, im3, im4 = (None, )*4
def update(context):
c = context
params = config.params
solver = config.solver
#if params.tstep == 2: reset_profile(profile)
if (params.tstep % params.plot_step == 0 or
params.tstep % params.compute_energy == 0):
U = solver.get_velocity(**context)
global im1, im2, im3, OS, e0, acc
if not plt is None:
if im1 is None and solver.rank == 0 and params.plot_step > 0:
plt.figure()
im1 = plt.contourf(c.X[0][:, 0, :], c.X[2][:, 0, :], c.U[2, :, 0, :], 100)
plt.colorbar(im1)
plt.draw()
plt.figure()
im2 = plt.contourf(c.X[0][:, 0, :], c.X[2][:, 0, :], c.U[0, :, 0, :] - (1-c.X[2][:, 0, :]**2), 100)
plt.colorbar(im2)
plt.draw()
plt.figure()
im3 = plt.quiver(c.X[0][:, 0, :], c.X[2][:, 0, :], c.U[0, :, 0, :]-(1-c.X[2][:, 0, :]**2), c.U[2, :, 0, :])
plt.draw()
plt.pause(1e-6)
if params.tstep % params.plot_step == 0 and solver.rank == 0 and params.plot_step > 0:
im1.ax.clear()
im1.ax.contourf(c.X[0][:, 0, :], c.X[2][:, 0, :], U[2, :, 0, :], 100)
im1.autoscale()
im2.ax.clear()
im2.ax.contourf(c.X[0][:, 0, :], c.X[2][:, 0, :], U[0, :, 0, :]-(1-c.X[2][:, 0, :]**2), 100)
im2.autoscale()
im3.set_UVC(U[0, :, 0, :]-(1-c.X[2][:, 0, :]**2), U[2, :, 0, :])
plt.pause(1e-6)
if params.tstep % params.compute_energy == 0:
e1, e2, exact = compute_error(c)
div_u = solver.get_divergence(**c)
e3 = dx(div_u**2, c.FST, axis=2)
if solver.rank == 0 and not config.params.spatial_refinement_test:
acc[0] += abs(e1/e0-exact)
print("Time %2.5f Norms %2.16e %2.16e %2.16e %2.16e %2.16e" %(params.t, e1/e0, exact, e1/e0-exact, sqrt(e2), e3))
def compute_error(context):
global OS, e0, acc
c = context
params = config.params
solver = config.solver
U = solver.get_velocity(**c)
pert = (U[0] - (1-c.X[2]**2))**2 + U[2]**2
e1 = 0.5*dx(pert, c.FST, axis=2)
exact = exp(2*imag(OS.eigval)*params.t)
U0 = c.work[(U, 0, True)]
initOS(OS, OS.eigvals, OS.eigvectors, U0, c.X, t=params.t)
pert = (U[0] - U0[0])**2 + (U[2]-U0[2])**2
#pert = (U[2] - U0[2])**2
e2 = 0.5*dx(pert, c.FST, axis=2)
return e1, e2, exact
def regression_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
assert sqrt(e2) < 1e-12
def refinement_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
print("Computed error = %2.8e %2.8e %2.8e" %(sqrt(e2)/config.params.eps, config.params.dt, config.params.eps))
def eps_refinement_test(context):
e1, e2, exact = compute_error(context)
if config.solver.rank == 0:
print(r" %2d & %2.8e & %2.8e \\\ " %(-int(log10(config.params.eps)), sqrt(e2)/config.params.eps, e1/e0-exact))
def spatial_refinement_test(context):
_, e2, _ = compute_error(context)
if config.solver.rank == 0:
print(r" %2d & %2.8e & %2.8e \\\ " %(2**config.params.M[0], sqrt(e2)/config.params.eps, acc[0]))
if __name__ == "__main__":
config.update(
{'Re': 8000.,
'nu': 1./8000., # Viscosity
'dt': 0.001, # Time step
'T': 0.01, # End time
'L': [2*pi, pi, 2],
'M': [5, 2, 7],
'Dquad': 'GC',
'Bquad': 'GC',
'dealias': None
}, "channel"
)
config.channel.add_argument("--compute_energy", type=int, default=1)
config.channel.add_argument("--plot_step", type=int, default=1)
config.channel.add_argument("--refinement_test", type=bool, default=False)
config.channel.add_argument("--eps_refinement_test", type=bool, default=False)
config.channel.add_argument("--spatial_refinement_test", type=bool, default=False)
config.channel.add_argument("--eps", type=float, default=1e-7)
#solver = get_solver(update=update, regression_test=regression_test, mesh="channel")
solver = get_solver(update=update, mesh="channel")
if config.params.eps_refinement_test:
print("eps refinement-test")
solver.update = lambda x: None
solver.regression_test = eps_refinement_test
config.params.verbose = False
context = solver.get_context()
for eps in [1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10, 1e-11, 1e-12]:
config.params.eps = eps
initialize(solver, context)
set_Source(**context)
solve(solver, context)
elif config.params.spatial_refinement_test:
print("spatial refinement-test")
def update_(con):
e1, _, exact = compute_error(con)
acc[0] += abs(e1/e0-exact)
solver.update = update_
solver.regression_test = spatial_refinement_test
config.params.verbose = False
for M in [4, 5, 6, 7, 8]:
config.params.M = [M, 3, 1]
context = solver.get_context()
initialize(solver, context)
set_Source(**context)
solve(solver, context)
else:
if config.params.refinement_test:
solver.update = lambda x: None
solver.regression_test = refinement_test
context = solver.get_context()
initialize(solver, context)
set_Source(**context)
solve(solver, context)
| lgpl-3.0 |
jvrsantacruz/XlsxWriter | examples/pandas_chart_columns.py | 9 | 2024 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a grouped
# column chart using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
from vincent.colors import brews
# Some sample data to plot.
farm_1 = {'Apples': 10, 'Berries': 32, 'Squash': 21, 'Melons': 13, 'Corn': 18}
farm_2 = {'Apples': 15, 'Berries': 43, 'Squash': 17, 'Melons': 10, 'Corn': 22}
farm_3 = {'Apples': 6, 'Berries': 24, 'Squash': 22, 'Melons': 16, 'Corn': 30}
farm_4 = {'Apples': 12, 'Berries': 30, 'Squash': 15, 'Melons': 9, 'Corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
# Create a Pandas dataframe from the data.
df = pd.DataFrame(data, index=index)
# Create a Pandas Excel writer using XlsxWriter as the engine.
sheet_name = 'Sheet1'
writer = pd.ExcelWriter('pandas_chart_columns.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name)
# Access the XlsxWriter workbook and worksheet objects from the dataframe.
workbook = writer.book
worksheet = writer.sheets[sheet_name]
# Create a chart object.
chart = workbook.add_chart({'type': 'column'})
# Some alternative colours for the chart.
colors = ['#E41A1C', '#377EB8', '#4DAF4A', '#984EA3', '#FF7F00']
# Configure the series of the chart from the dataframe data.
for col_num in range(1, len(farm_1) + 1):
chart.add_series({
'name': ['Sheet1', 0, col_num],
'categories': ['Sheet1', 1, 0, 4, 0],
'values': ['Sheet1', 1, col_num, 4, col_num],
'fill': {'color': colors[col_num - 1]},
'overlap': -10,
})
# Configure the chart axes.
chart.set_x_axis({'name': 'Total Produce'})
chart.set_y_axis({'name': 'Farms', 'major_gridlines': {'visible': False}})
# Insert the chart into the worksheet.
worksheet.insert_chart('H2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/backend_bases.py | 4 | 111476 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes such as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
:class:`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
:class:`StatusbarBase`
The base class for the messaging area.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
import six
from six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib import lines
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation, warn_deprecated
import matplotlib.backend_tools as tools
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
del Image
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
*gc*
a :class:`GraphicsContextBase` instance with clipping information.
*x*
the distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
*y*
the distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
*im*
An NxMx4 array of RGBA pixels (of dtype uint8).
*transform*
If and only if the concrete backend is written such that
:meth:`option_scale_image` returns ``True``, an affine
transformation *may* be passed to :meth:`draw_image`. It takes the
form of a :class:`~matplotlib.transforms.Affine2DBase` instance.
The translation vector of the transformation is given in physical
units (i.e., dots or pixels). Note that the transformation does not
override `x` and `y`, and has to be applied *before* translating
the result by `x` and `y` (this can be accomplished by adding `x`
and `y` to the translation vector defined by `transform`).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily always
want to rescale and composite raster images. (like SVG, PDF, or PS)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary affine
transformations in :meth:`draw_image` (most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase(object):
"""
An abstract base class that provides color, line styles, etc...
"""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._hatch_color = gc._hatch_color
self._hatch_linewidth = gc._hatch_linewidth
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
if path is not None and not isinstance(path,
transforms.TransformedPath):
msg = ("Path should be a matplotlib.transforms.TransformedPath"
"instance.")
raise ValueError(msg)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
# When removing, remember to remove all overrides in subclasses.
msg = ("set_graylevel is deprecated for removal in 1.6; "
"you can achieve the same result by using "
"set_foreground((frac, frac, frac))")
warnings.warn(msg, mplDeprecation)
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = float(w)
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). These are defined in the rcParams
`lines.dashed_pattern`, `lines.dashdot_pattern` and
`lines.dotted_pattern`. One may also specify customized dash
styles by providing a tuple of (offset, dash pairs).
"""
self._linestyle = style
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_hatch_color(self):
"""
Gets the color to use for hatching.
"""
return self._hatch_color
def set_hatch_color(self, hatch_color):
"""
sets the color to use for hatching.
"""
self._hatch_color = hatch_color
def get_hatch_linewidth(self):
"""
Gets the linewidth to use for hatching.
"""
return self._hatch_linewidth
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event(object):
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events). Note that in the nbagg backend, both the
middle and right clicks return 3 since right clicking will bring
up the context menu in some browsers.
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
line = event.artist
xdata, ydata = line.get_data()
ind = event.ind
print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
self._is_idle_drawing = True
self._is_saving = False
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
yield
self._is_idle_drawing = False
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
# Find the top artist under the cursor
under = self.figure.hitlist(ev)
under.sort(key=lambda x: x.zorder)
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
msg = ("onHilite has been deprecated in 1.5 and will be removed "
"in 1.6. This function has not been used internally by mpl "
"since 2007.")
warnings.warn(msg, mplDeprecation)
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist,
guiEvent=mouseevent.guiEvent,
**kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure; if None, defaults to savefig.facecolor
*edgecolor*
the edgecolor of the figure; if None, defaults to savefig.edgecolor
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
self._is_saving = True
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)
if facecolor is None:
facecolor = rcParams['savefig.facecolor']
if edgecolor is None:
edgecolor = rcParams['savefig.edgecolor']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_basename = self.get_window_title() or 'image'
default_basename = default_basename.replace(' ', '_')
default_filetype = self.get_default_filetype()
default_filename = default_basename + '.' + default_filetype
save_dir = os.path.expanduser(rcParams.get('savefig.directory', ''))
# ensure non-existing filename in save dir
i = 1
while os.path.isfile(os.path.join(save_dir, default_filename)):
# attach numerical count to basename
default_filename = '{0}-{1}.{2}'.format(default_basename, i, default_filetype)
i += 1
return default_filename
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
if s == 'idle_event':
warn_deprecated(1.5,
"idle_event is only implemented for the wx backend, and will "
"be removed in matplotlib 2.1. Use the animations module "
"instead.")
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode ('f', 'ctrl + f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
warnings.warn(str(exc))
ax.set_yscale('linear')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError:
warnings.warn(str(exc))
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event',
self.key_press)
else:
self.key_press_handler_id = None
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
if rcParams['toolbar'] != 'toolmanager':
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
cursors = tools.cursors
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def remove_rubberband(self):
"""Remove the rubberband"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event) and a.get_visible()]
if artists:
a = max(artists, key=lambda x: x.zorder)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.canvas.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(views)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
self.remove_rubberband()
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
# allows the user to "cancel" a zoom action
# by zooming by less than 5 pixels
if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
(abs(y - lasty) < 5 and self._zoom_mode!="x")):
self._xypress = None
self.release(event)
self.draw()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
views = self._views()
if views is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
class ToolContainerBase(object):
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `ToolManager` object that holds the tools that
this `ToolContainer` wants to communicate with.
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_removed_event',
self._remove_tool_cbk)
def _tool_toggled_cbk(self, event):
"""
Captures the 'tool_trigger_[name]'
This only gets used for toggled tools
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Adds a tool to this container
Parameters
----------
tool : tool_like
The tool to add, see `ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int (optional)
The position within the group to place this tool. Defaults to end.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool.image)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
def _remove_tool_cbk(self, event):
"""Captures the 'tool_removed_event' signal and removes the tool"""
self.remove_toolitem(event.tool.name)
def _get_image_filename(self, image):
"""Find the image based on its name"""
# TODO: better search for images, they are not always in the
# datapath
basedir = os.path.join(rcParams['datapath'], 'images')
if image is not None:
fname = os.path.join(basedir, image)
else:
fname = None
return fname
def trigger_tool(self, name):
"""
Trigger the tool
Parameters
----------
name : String
Name(id) of the tool triggered from within the container
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
Add a toolitem to the container
This method must get implemented per backend
The callback associated with the button click event,
must be **EXACTLY** `self.trigger_tool(name)`
Parameters
----------
name : string
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons
group : String
Name of the group that this tool belongs to
position : Int
Position of the tool within its group, if -1 it goes at the End
image_file : String
Filename of the image for the button or `None`
description : String
Description of the tool, used for the tooltips
toggle : Bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks)
* `False` : The button is a normal button (returns to unpressed
state after release)
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
Toggle the toolitem without firing event
Parameters
----------
name : String
Id of the tool to toggle
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
Remove a toolitem from the `ToolContainer`
This method must get implemented per backend
Called when `ToolManager` emits a `tool_removed_event`
Parameters
----------
name : string
Name of the tool to remove
"""
raise NotImplementedError
class StatusbarBase(object):
"""Base class for the statusbar"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_message_event',
self._message_cbk)
def _message_cbk(self, event):
"""Captures the 'tool_message_event' and set the message"""
self.set_message(event.message)
def set_message(self, s):
"""
Display a message on toolbar or in status bar
Parameters
----------
s : str
Message text
"""
pass
| bsd-2-clause |
mapattacker/cheatsheets | python/pandas_stocks.py | 1 | 1885 | import pandas as pd
import pandas_datareader.data as web
from datetime import datetime
# PARSE DATETIME
#--------------------------------------------------------
## RESAMPLE
# aggregation by date intervals
# list of rules can be found in url: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
df.resample(rule='A').mean()
## SOME FUNCTIONS
ford['Volume'].argmax() #returns the index where 1st occurence of max occur. given that index is datetime
## SHIFT
## PERCENTAGE CHANGE; per day
tesla['returns'] = (tesla['Close'] / tesla['Close'].shift(1) ) - 1 #OR
tesla['returns'] = tesla['Close'].pct_change(1)
## CUMULATIVE DAILY RETURNS
# Cumulative return is computed relative to the day investment is made.
tesla['Cumulative Return'] = (1 + tesla['returns']).cumprod()
## ROLLING MEAN (OR MOVING AVERAGE)
df['Close'].rolling(window=30).mean()
## EXPANDING
## average since start
df['Close'].expanding(min_periods=1).mean()
# BOLLINGER BANDS
#Developed by John Bollinger, Bollinger Bands are volatility bands placed above and below a moving average.
#Volatility is based on the standard deviation, which changes as volatility increases and decreases.
#The bands automatically widen when volatility increases and narrow when volatility decreases.
#This dynamic nature of Bollinger Bands also means they can be used on different securities with the standard settings.
#For signals, Bollinger Bands can be used to identify Tops and Bottoms or to determine the strength of the trend.
df['Close: 30 Day Mean'] = df['Close'].rolling(window=20).mean() #SMA
df['Upper'] = df['Close: 30 Day Mean'] + 2*df['Close'].rolling(window=20).std() #upper band
df['Lower'] = df['Close: 30 Day Mean'] - 2*df['Close'].rolling(window=20).std() #lower band
df[['Close','Close: 30 Day Mean','Upper','Lower']].plot(figsize=(16,6)) #plot | mit |
akloster/table-cleaner | table_cleaner/cleaner.py | 1 | 1875 | from __future__ import unicode_literals
from .validators import *
import six
import pandas as pd
class CleanerMetaclass(type):
def __init__(cls, name, bases, nmspc):
super(CleanerMetaclass, cls).__init__(name, bases, nmspc)
if not hasattr(cls, "_fields"):
cls._fields = {}
else:
cls._fields = cls._fields.copy()
for k,v in nmspc.items():
if k in ['__init__','__qualname__', '__module__']:
continue
cls._fields[k] = v
class Cleaner(six.with_metaclass(CleanerMetaclass, object)):
def __init__(self, original, verdict_counter=0):
output_rows = []
verdict_rows = []
verdict_index = []
self.original = original
for index, row in self.original.iterrows():
out_row = dict()
for key in row.index.get_values():
out_row[key] = None
keys = []
valid = True
for key, validator in six.iteritems(self._fields):
for verdict in validator.validate(row[key]):
vrow = verdict.to_row()
vrow["column"] = key
vrow["counter"] = verdict_counter
verdict_counter += 1
verdict_rows.append(vrow)
verdict_index.append(index)
value = verdict.value
valid &= verdict.valid
out_row[key] = value
if not valid:
continue
keys.append(key)
for key in set(self.original.columns.get_values())-set(keys):
out_row[key] = row[key]
if valid:
output_rows.append(out_row)
self.verdicts = pd.DataFrame(verdict_rows, index=verdict_index)
self.cleaned = pd.DataFrame(output_rows)
| bsd-2-clause |
pratapvardhan/pandas | pandas/util/testing.py | 1 | 92892 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps
from contextlib import contextmanager
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like,
is_extension_array_dtype)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.core.common as com
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, StringIO, string_types, PY3, PY2)
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
yield f
f.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default False
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('{prefix}.*'.format(prefix=prefix)) \
.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, type '
'encountered {name!r}').format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
).format(name=objs.__class__.__name__)
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray)
assert left.dtype == right.dtype
left_na = left.isna()
right_na = right.isna()
assert_numpy_array_equal(left_na, right_na)
left_valid = left[~left_na].astype(object)
right_valid = right[~right_na].astype(object)
assert_numpy_array_equal(left_valid, right_valid)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
left = pd.IntervalIndex(left)
right = pd.IntervalIndex(right)
assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj))
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.values, right.values)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
return TimedeltaIndex(start='1 day', periods=k, freq=freq,
name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product(
(('foo', 'bar'), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeRangeIndex,
makeIntervalIndex, makeCategoricalIndex,
makeMultiIndex
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makeTimeDataFrame(nper) for c in cols}
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makePeriodFrame(nper) for c in cols}
return Panel.fromDict(data)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
.format(idx_type=idx_type))
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if PY3:
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@compat.wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error {error}".format(error=e))
try:
e_str = traceback.format_exc(e)
except Exception:
e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip("Skipping test because exception "
"message is known and error {error}".format(error=e))
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error {error}".format(e))
return wrapper
with_connectivity_check = network
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`. This is a port of the `assertRaisesRegexp`
function from unittest in Python 2.7.
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{name} not raised.".format(name=exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
msg = '"{pat}" does not match "{val}"'.format(
pat=self.regexp.pattern, val=val)
e = AssertionError(msg)
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except Exception:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append((actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno))
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__)
assert saw_warning, msg
assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}."
).format(extra=extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except KeyError:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
skipna_wrapper : function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
benanne/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.py | 7 | 17404 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
agiovann/Constrained_NMF | use_cases/granule_cells/figures_correlation.py | 2 | 6173 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 7 13:44:32 2016
@author: agiovann
"""
from __future__ import print_function
#%%
from builtins import zip
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
from glob import glob
import numpy as np
import pylab as pl
import os
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
import calblitz as cb
from calblitz.granule_cells import utils_granule as gc
from glob import glob
import numpy as np
import os
import scipy
import pylab as pl
import ca_source_extraction as cse
import pickle
import calblitz as cb
from calblitz.granule_cells.utils_granule import load_data_from_stored_results
#%%
base_folder = '/mnt/ceph/users/agiovann/ImagingData/eyeblink/b37/20160705103903/'
traces, masks, triggers_out, amplitudes, ISI = load_data_from_stored_results(base_folder, thresh_CR=0.1,
threshold_responsiveness=0.1, is_blob=True, time_CR_on=-.1, time_US_on=.05, thresh_MOV_iqr=1000,
time_CS_on_MOV=-.25, time_US_on_MOV=0)
wheel_mat = traces['wheel_traces']
ftraces = traces['fluo_traces']
time_mat = traces['time_fluo']
time_e_mat = traces['time_eye']
time_w_mat = traces['time_wheel']
eye_mat = traces['eye_traces']
amplitudes_eyelid = amplitudes['amplitudes_eyelid']
amplitudes_fluo = amplitudes['amplitudes_fluo']
#%%
counter = 0
with np.load(glob(os.path.join(base_folder, '*-template_total.npz'))[0]) as ld:
templs = ld['template_each']
for mn1, A in zip(templs, masks['A_each']):
pl.subplot(2, 3, counter + 1)
# mn=np.median(templs,0)
mn = mn1
d1, d2 = np.shape(mn)
# selem = disk(50)
# mn=(mn1 - np.min(mn1))/(np.max(mn1)-np.min(mn1))
# mn = rank.equalize(mn, selem=selem)
# mn = exposure.equalize_hist(mn,nbins=1024)
# os.path.split(fl)[-1]
# pl.imshow(mn,cmap='gray')
# pl.imshow(mn,cmap='gray',vmax=np.percentile(mn,99))
# pl.imshow(mn,cmap='gray',vmax=np.percentile(mn,98))
pl.imshow(A.mean(1).reshape((d1, d2), order='F'), alpha=1, cmap='hot')
# pl.xlim([0,512])
# pl.ylim([300,512])
pl.axis('off')
counter += 1
# pl.title(fl.split('/')[-2][:8])
#%%
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxCSUSCR']], 0))
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxCSUSNOCR']], 0))
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxCSCR']], 0))
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxCSNOCR']], 0))
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idx_US']], 0))
pl.legend(['idxCSUSCR', 'idxCSUSNOCR', 'idxCSCR', 'idxCSNOCR', 'idxUS'])
pl.xlabel('time to US (s)')
pl.ylabel('eyelid closure')
pl.axvspan(-ISI, ISI, color='g', alpha=0.2, lw=0)
pl.axvspan(0, 0.03, color='r', alpha=0.2, lw=0)
pl.xlim([-.5, .5])
pl.ylim([-.1, None])
#%%
pl.close()
pl.subplot(2, 2, 1)
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxCR']], 0), '-*')
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxNOCR']], 0), '-d')
pl.plot(time_e_mat, np.mean(eye_mat[triggers_out['nm_idxUS']], 0), '-o')
pl.xlabel('Time to US (s)')
pl.ylabel('Eyelid closure')
pl.axvspan(-ISI, ISI, color='g', alpha=0.2, lw=0)
pl.axvspan(0, 0.03, color='r', alpha=0.2, lw=0)
pl.xlim([-.5, .5])
pl.ylim([-.1, None])
pl.legend(['CR+', 'CR-', 'US'], loc='upper left')
pl.subplot(2, 2, 2)
pl.plot(time_mat, np.median(
ftraces[triggers_out['nm_idxCR']], axis=(0, 1)), '-*')
pl.plot(time_mat, np.median(
ftraces[triggers_out['nm_idxNOCR']], axis=(0, 1)), '-d')
pl.plot(time_mat, np.median(
ftraces[triggers_out['nm_idxUS']], axis=(0, 1)), '-o')
pl.axvspan((-ISI), ISI, color='g', alpha=0.2, lw=0)
pl.axvspan(0, 0.03, color='r', alpha=0.5, lw=0)
pl.xlabel('Time to US (s)')
pl.ylabel('DF/F')
pl.xlim([-.5, .5])
pl.ylim([-.1, None])
#%
import pandas
bins = np.arange(-.15, 0.7, .2)
n_bins = 6
thresh__correlation = .93
dfs = []
dfs_random = []
idxCSCSUS = triggers_out['nm_idxCSCSUS']
x_name = 'ampl_eye'
y_name = 'ampl_fl'
for resps in amplitudes_fluo.T:
idx_order = np.arange(len(idxCSCSUS))
dfs.append(pandas.DataFrame(
{y_name: resps[idxCSCSUS[idx_order]],
x_name: amplitudes_eyelid[idxCSCSUS]}))
idx_order = np.random.permutation(idx_order)
dfs_random.append(pandas.DataFrame(
{y_name: resps[idxCSCSUS[idx_order]],
x_name: amplitudes_eyelid[idxCSCSUS]}))
r_s = []
r_ss = []
for df, dfr in zip(dfs, dfs_random): # random scramble
if bins is None:
[_, bins] = np.histogram(dfr.ampl_eye, n_bins)
groups = dfr.groupby(np.digitize(dfr.ampl_eye, bins))
grouped_mean = groups.mean()
grouped_sem = groups.sem()
(r, p_val) = scipy.stats.pearsonr(
grouped_mean.ampl_eye, grouped_mean.ampl_fl)
# r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
r_ss.append(r)
if bins is None:
[_, bins] = np.histogram(df.ampl_eye, n_bins)
groups = df.groupby(np.digitize(df.ampl_eye, bins))
grouped_mean = groups.mean()
grouped_sem = groups.sem()
(r, p_val) = scipy.stats.pearsonr(
grouped_mean.ampl_eye, grouped_mean.ampl_fl)
# r=np.corrcoef(grouped_mean.ampl_eye,grouped_mean.ampl_fl)[0,1]
r_s.append(r)
if r_s[-1] > thresh__correlation:
pl.subplot(2, 2, 3)
print('found')
pl.errorbar(grouped_mean.ampl_eye, grouped_mean.ampl_fl, grouped_sem.ampl_fl.as_matrix(
), grouped_sem.ampl_eye.as_matrix(), fmt='.')
pl.scatter(grouped_mean.ampl_eye, grouped_mean.ampl_fl,
s=groups.apply(len).values * 3)
pl.xlabel(x_name)
pl.ylabel(y_name)
mu_scr = np.mean(r_ss)
std_scr = np.std(r_ss)
[a, b] = np.histogram(r_s, 20)
pl.subplot(2, 2, 4)
pl.plot(b[1:], scipy.signal.savgol_filter(a, 3, 1))
pl.axvspan(mu_scr - std_scr, mu_scr + std_scr, color='r', alpha=0.2, lw=0)
pl.xlabel('Correlation coefficients')
pl.ylabel('bin counts')
#%%
pl.savefig(base_folder + 'correlations.pdf')
pl.close()
| gpl-2.0 |
UKPLab/sentence-transformers | tests/test_evaluator.py | 1 | 3464 | """
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from sentence_transformers import SentenceTransformer, evaluation, util, losses, LoggingHandler
import logging
import unittest
from sklearn.metrics import f1_score, accuracy_score
import numpy as np
import gzip
import csv
from sentence_transformers import InputExample
from torch.utils.data import DataLoader
import os
class EvaluatorTest(unittest.TestCase):
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold(self):
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
best_f1, best_precision, best_recall, threshold = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(y_pred_cosine, y_true, high_score_more_similar=True)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold(self):
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
max_acc, threshold = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(y_pred_cosine, y_true, high_score_more_similar=True)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(self):
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
nli_dataset_path = 'datasets/AllNLI.tsv.gz'
if not os.path.exists(nli_dataset_path):
util.http_get('https://sbert.net/datasets/AllNLI.tsv.gz', nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['split'] == 'train':
label_id = label2int[row['label']]
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int))
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
acc = evaluator(model)
assert acc > 0.2
def test_ParaphraseMiningEvaluator(self):
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences = {0: "Hello World", 1: "Hello World!", 2: "The cat is on the table", 3: "On the table the cat is"}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0,1), (2,3)])
score = data_eval(model)
assert score > 0.99 | apache-2.0 |
SEMCOG/synthpop | synthpop/synthesizer.py | 1 | 5583 | import logging
import sys
from collections import namedtuple
import numpy as np
import pandas as pd
from scipy.stats import chisquare
from . import categorizer as cat
from . import draw
from .ipf.ipf import calculate_constraints
from .ipu.ipu import household_weights
logger = logging.getLogger("synthpop")
FitQuality = namedtuple(
'FitQuality',
('people_chisq', 'people_p'))
BlockGroupID = namedtuple(
'BlockGroupID', ('state', 'county', 'tract', 'block_group'))
def enable_logging():
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def synthesize(h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=.01, jd_zero_sub=.001, hh_index_start=0):
# this is the zero marginal problem
h_marg = h_marg.replace(0, marginal_zero_sub)
p_marg = p_marg.replace(0, marginal_zero_sub)
# zero cell problem
h_jd.frequency = h_jd.frequency.replace(0, jd_zero_sub)
p_jd.frequency = p_jd.frequency.replace(0, jd_zero_sub)
# ipf for households
logger.info("Running ipf for households")
h_constraint, _ = calculate_constraints(h_marg, h_jd.frequency)
h_constraint.index = h_jd.cat_id
logger.debug("Household constraint")
logger.debug(h_constraint)
# ipf for persons
logger.info("Running ipf for persons")
p_constraint, _ = calculate_constraints(p_marg, p_jd.frequency)
# p_constraint.index = p_jd.cat_id
logger.debug("Person constraint")
logger.debug(p_constraint)
# modify person cat ids so they are unique when combined with households
p_starting_cat_id = h_jd['cat_id'].max() + 1
p_jd['cat_id'] += p_starting_cat_id
p_pums['cat_id'] += p_starting_cat_id
p_constraint.index = p_jd.cat_id
# make frequency tables that the ipu expects
household_freq, person_freq = cat.frequency_tables(p_pums, h_pums,
p_jd.cat_id,
h_jd.cat_id)
# do the ipu to match person marginals
logger.info("Running ipu")
import time
t1 = time.time()
best_weights, fit_quality, iterations = household_weights(household_freq,
person_freq,
h_constraint,
p_constraint)
logger.info("Time to run ipu: %.3fs" % (time.time()-t1))
logger.debug("IPU weights:")
logger.debug(best_weights.describe())
logger.debug("Fit quality:")
logger.debug(fit_quality)
logger.debug("Number of iterations:")
logger.debug(iterations)
num_households = int(h_marg.groupby(level=0).sum().mean())
print "Drawing %d households" % num_households
best_chisq = np.inf
return draw.draw_households(
num_households, h_pums, p_pums, household_freq, h_constraint,
p_constraint, best_weights, hh_index_start=hh_index_start)
def synthesize_all(recipe, num_geogs=None, indexes=None,
marginal_zero_sub=.01, jd_zero_sub=.001):
"""
Returns
-------
households, people : pandas.DataFrame
fit_quality : dict of FitQuality
Keys are geographic IDs, values are namedtuples with attributes
``.household_chisq``, ``household_p``, ``people_chisq``,
and ``people_p``.
"""
print "Synthesizing at geog level: '{}' (number of geographies is {})".\
format(recipe.get_geography_name(), recipe.get_num_geographies())
if indexes is None:
indexes = recipe.get_available_geography_ids()
hh_list = []
people_list = []
cnt = 0
fit_quality = {}
hh_index_start = 0
# TODO will parallelization work here?
for geog_id in indexes:
print "Synthesizing geog id:\n", geog_id
h_marg = recipe.get_household_marginal_for_geography(geog_id)
logger.debug("Household marginal")
logger.debug(h_marg)
p_marg = recipe.get_person_marginal_for_geography(geog_id)
logger.debug("Person marginal")
logger.debug(p_marg)
h_pums, h_jd = recipe.\
get_household_joint_dist_for_geography(geog_id)
logger.debug("Household joint distribution")
logger.debug(h_jd)
p_pums, p_jd = recipe.get_person_joint_dist_for_geography(geog_id)
logger.debug("Person joint distribution")
logger.debug(p_jd)
households, people, people_chisq, people_p = \
synthesize(
h_marg, p_marg, h_jd, p_jd, h_pums, p_pums,
marginal_zero_sub=marginal_zero_sub, jd_zero_sub=jd_zero_sub,
hh_index_start=hh_index_start)
# Append location identifiers to the synthesized households
for geog_cat in geog_id.keys():
households[geog_cat] = geog_id[geog_cat]
hh_list.append(households)
people_list.append(people)
key = BlockGroupID(
geog_id['state'], geog_id['county'], geog_id['tract'],
geog_id['block group'])
fit_quality[key] = FitQuality(people_chisq, people_p)
cnt += 1
if len(households) > 0:
hh_index_start = households.index.values[-1] + 1
if num_geogs is not None and cnt >= num_geogs:
break
# TODO might want to write this to disk as we go?
all_households = pd.concat(hh_list)
all_persons = pd.concat(people_list, ignore_index=True)
return (all_households, all_persons, fit_quality)
| bsd-3-clause |
evanbiederstedt/RRBSfun | scripts/PDR_methyl_normal_mcell_by_cell1.py | 1 | 2419 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
mcells = glob.glob("RRBS_NormalBCD19pCD27mcell*") # mcell
newdf1 = pd.DataFrame()
for filename in mcells:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'tss', 'genes', 'exons', 'introns',
'promoter', 'cgi', 'geneDensity', 'ctcfUpstream', 'ctcfDownstream',
'ctcfDensity', 'geneDistalRegulatoryModules', 'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance',
'3PrimeUTRDistance', '5PrimeUTR', '5PrimeUTRDistance', 'firstExon',
'geneDistalRegulatoryModulesK562', 'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'tssDistance', 'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
chromosomes = ['chr2', 'chr5', 'chr11']
df = df[(df["chr"].isin(chromosomes))]
num_bins = np.ceil(df["avgReadCpGs"].max()/1.25)
df["avgReadCpGs_binned"] = pd.cut(df['avgReadCpGs'], num_bins, labels=False)
df["read_stack_ID"] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df["total_reads"] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df = df.groupby(["read_stack_ID"])[["filename", "thisMeth", "thisUnmeth", "methReadCount", "unmethReadCount", "mixedReadCount", "total_reads"]].sum()
df["filename"] = str(filename)
df = df[["filename", "thisMeth", "thisUnmeth", "mixedReadCount", "total_reads"]].sum()
df["PDR_total"] = df["mixedReadCount"]/df["total_reads"]
df["methylation"] = df["thisMeth"]/(df["thisMeth"]+df["thisUnmeth"]) # corrected
newdf1 = newdf1.append(df, ignore_index=True)
newdf1 = newdf1[["filename", "methylation_total", "PDR_total", "thisMeth", "mixedReadCount", "total_reads"]]
# export as .csv
newdf1.to_csv("Meth_PDR_cell_normal_mcell.csv") | mit |
jamesp/shallowwater | beta_plane/exoplanet.py | 1 | 4070 | import sys
import numpy as np
import xarray as xr
from tqdm import tqdm
from shallowwater import PeriodicLinearShallowWater
from plotting import plot_wind_arrows
nx = 128*4
ny = 129
nd = 25 # number of days to run
DAY = 86400
RADIUS = 6371e3
PLOT = False
SNAP_DAYS = 5
# # Radius of deformation: Rd = sqrt(2 c / beta)
Rd = 3000.0e3 # Fix Rd at 2000km
Lx = 4*np.pi*RADIUS
Ly = Lx//4
beta0=3e-13
# Kelvin/gravity wave speed: c = sqrt(phi0)
phi0 = float(sys.argv[1])
c = np.sqrt(phi0)
delta_phi = phi0*0.1
print('c', c)
# cfl = 0.4 # For numerical stability CFL = |u| dt / dx < 1.0
# dx = Lx / nx
# dt = np.floor(cfl * dx / (c*4))
# print('dt', dt)
if c > 32:
dt = 600
else:
dt = 1200
tau_rad = 4.0*DAY
tau_fric = 4.0*DAY
class MatsunoGill(PeriodicLinearShallowWater):
def __init__(self, nx, ny, Lx, Ly, alpha, beta, phi0,
tau_fric, tau_rad, dt=dt, nu=5.0e2, r=1e-4):
super(MatsunoGill, self).__init__(nx, ny, Lx, Ly, beta=beta, g=1.0, H=phi0, f0=0.0, dt=dt, nu=nu, r=r)
self.alpha = alpha
self.phi0 = phi0
#self.phi[:] += phi0
def to_dataset(self):
dataset = super(MatsunoGill, self).to_dataset()
dataset['phi_eq'] = xr.DataArray(self.phi_eq().T.copy(), coords=(dataset.y, dataset.x))
dataset['phi_eq_xi'] = xr.DataArray(self.centre_substellar(self.phi_eq()).T.copy(), coords=(dataset.y, dataset.x))
dataset['phi_xi'] = xr.DataArray(self.centre_substellar(self.phi).T.copy(), coords=(dataset.y, dataset.x))
return dataset
def substellarx(self, t=None):
if t is None:
t = self.t
return np.fmod(t*self.alpha*self.c, self.Lx)
@property
def c(self):
return np.sqrt(self.phi0)
@property
def phixi(self):
subx = self.substellarx()
sx = self.phix - subx
sx[sx < -self.Lx/2] = sx[sx < -self.Lx/2] + self.Lx
sx[sx > self.Lx/2] = sx[sx > self.Lx/2] - self.Lx
return sx
def centre_substellar(self, psi):
subi = np.argmin(self.phixi**2)
return np.roll(psi, self.nx//2 - subi, axis=0)
def phi_eq(self):
return delta_phi*np.exp(-((self.phixi)**2 + self.phiy**2) / (Rd**2))
def rhs(self):
u, v, phi = self.state
# phi rhs
dphi = np.zeros_like(phi)
du, dv = np.zeros_like(self.u), np.zeros_like(self.v)
# Newtonian cooling / Rayleigh Friction
dphi += (self.phi_eq() - phi)/tau_rad
du -= u / tau_fric
dv -= v / tau_fric
return np.array([du, dv, dphi])
alphas = [-2., -1., -.75, -.5, -.25, -.1, 0., .1, .25, .5, .75, 1., 2.]
betas = [1, 3, 10, 30, 100, 300]
#betas = [1., 10., 100.]
#alphas = [0.]
#betas = [1]
odata = []
if PLOT:
import matplotlib.pyplot as plt
plt.ion()
fig, ax = plt.subplots()
for b in tqdm(betas):
beta = b*beta0
bdata = []
for a in tqdm(alphas):
atmos = MatsunoGill(nx, ny, Lx, Ly, beta=beta, alpha=a,
phi0=phi0, tau_fric=tau_fric, tau_rad=tau_rad,
dt=dt, nu=5.0e3)
snapshots = []
def take_snapshot():
dset = atmos.to_dataset()
dset.coords['time'] = atmos.t
snapshots.append(dset)
take_snapshot()
prog = tqdm(range(int(nd*DAY/dt)))
for i in prog:
atmos.step()
if atmos.t % (86400*SNAP_DAYS) == 0:
#print('%.1f\t%.2f' % (atmos.t/DAY, np.max(atmos.u**2)))
take_snapshot()
prog.set_description('u: %.2f' % atmos.u.max())
if PLOT:
plt.clf()
dset.phi.plot.contourf(levels=13)
plt.show()
plt.pause(0.01)
adata = xr.concat(snapshots, dim='time')
adata.coords['alpha'] = a
bdata.append(adata)
data = xr.concat(bdata, dim='alpha')
data.coords['beta'] = b
odata.append(data)
data = xr.concat(odata, dim='beta')
data.to_netcdf('/Users/jp492/Dropbox/data/beta_data_linear_h%.0f.nc' % (phi0)) | mit |
anntzer/scikit-learn | examples/applications/svm_gui.py | 16 | 11351 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
try:
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk
except ImportError:
# NavigationToolbar2TkAgg was deprecated in matplotlib 2.2
from matplotlib.backends.backend_tkagg import (
NavigationToolbar2TkAgg as NavigationToolbar2Tk
)
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import sys
import numpy as np
import tkinter as Tk
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model:
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller:
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View:
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
try:
canvas.draw()
except AttributeError:
# support for matplotlib (1.*)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2Tk(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, r"RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, r"Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in range(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar:
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
hycis/Mozi | example/mnist_dae.py | 1 | 2311 | import os
import theano
import theano.tensor as T
import numpy as np
from mozi.datasets.mnist import Mnist
from mozi.model import Sequential
from mozi.layers.linear import Linear
from mozi.layers.activation import *
from mozi.layers.noise import Dropout, Gaussian
from mozi.log import Log
from mozi.train_object import TrainObject
from mozi.cost import mse, error, entropy
from mozi.learning_method import *
from mozi.weight_init import *
from mozi.env import setenv
from sklearn.metrics import accuracy_score
def train():
# build dataset
data = Mnist(batch_size=64, train_valid_test_ratio=[5,1,1])
# for autoencoder, the output will be equal to input
data.set_train(X=data.get_train().X, y=data.get_train().X)
data.set_valid(X=data.get_valid().X, y=data.get_valid().X)
# build model
model = Sequential(input_var=T.matrix(), output_var=T.matrix())
# build encoder
model.add(Gaussian())
encode_layer1 = Linear(prev_dim=28*28, this_dim=200)
model.add(encode_layer1)
model.add(RELU())
encode_layer2 = Linear(prev_dim=200, this_dim=50)
model.add(encode_layer2)
model.add(Tanh())
# build decoder
decode_layer1 = Linear(prev_dim=50, this_dim=200, W=encode_layer2.W.T)
model.add(decode_layer1)
model.add(RELU())
decode_layer2 = Linear(prev_dim=200, this_dim=28*28, W=encode_layer1.W.T)
model.add(decode_layer2)
model.add(Sigmoid())
# build learning method
learning_method = AdaGrad(learning_rate=0.01, momentum=0.9,
lr_decay_factor=0.9, decay_batch=10000)
# put everything into the train object
train_object = TrainObject(model = model,
log = None,
dataset = data,
train_cost = entropy,
valid_cost = entropy,
learning_method = learning_method,
stop_criteria = {'max_epoch' : 10,
'epoch_look_back' : 5,
'percent_decrease' : 0.01}
)
# finally run the code
train_object.setup()
train_object.run()
if __name__ == '__main__':
setenv()
train()
| mit |
dandanvidi/in-vivo-enzyme-kinetics | scripts/plot_cu_hists_for_specific_conditions.py | 3 | 3647 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 09:03:53 2016
@author: dan
"""
import pandas as pd
from capacity_usage import CAPACITY_USAGE
import matplotlib.pyplot as plt
#import seaborn as sns
import sys, os
import numpy as np
from scipy.stats import ranksums, wilcoxon
import re
cmap = plt.cm.viridis
flux = pd.DataFrame.from_csv("../data/mmol_gCDW_h.csv")
abundance = pd.DataFrame.from_csv("../data/g_gCDW.csv")
cu = CAPACITY_USAGE(flux, abundance)
#%%
conds = cu.conditions.index
color = ['red', '0.7']
colors = dict(zip(conds,color))
#hpad=0.5
fs=15
figsize=(8,6)
b = 10
for c in conds:
capacity_usage = cu.CU.loc[:, c].copy()
capacity_usage.replace(lambda x: x<0, np.nan, inplace=True)
capacity_usage.dropna(inplace=True)
out, bins = pd.cut(capacity_usage,b, retbins=True)
out.name = "bins"
df = pd.DataFrame(out)
s = df.groupby("bins")
# E = cu.E.loc[capacity_usage.index, c] / cu.E.loc[capacity_usage.index, c].sum()
cap = {k:cu.E.loc[v, c].sum() for k,v in s.groups.iteritems()}
cap = {k:v/sum(cap.values()) for k,v in cap.iteritems()}
cap = pd.Series(cap)
# df = capacity_usage/2
# print len(df)
# hist, bin_edges = np.histogram(df, bins=50, normed=True)
# bin_edges *= 2
C = [cmap(1-i) for i in bins]
# ymax = hist.max()
# print hist[-1]
# ylim=15
# if ymax < ylim:
#
plt.figure(figsize=figsize)
ax = plt.axes()
# ax = plt.subplot2grid((4, 2), (0, 0), colspan=2, rowspan=4)
# ax.bar(bin_edges[:-1],hist,color=C,width=0.02)
cap.plot(kind='bar', width=0.75, ax=ax, color=C)
ax.set_ylim(0,.40)
ax.tick_params(right=0, top=0, bottom=0, direction='out', labelsize=fs)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_yticks(np.arange(0,.41,.10))
ylabels = []
for i in cap.index:
tmp = re.findall("[-+]?\d*\.\d+|\d+", i)
ylab = "-".join(tmp)
if "-0.001" in ylab:
ylab = '0-0.1'
ylabels.append(ylab)
ax.set_xticklabels(ylabels, rotation=45)
ax.set_xlabel('capacity utilization', size=fs)
ax.set_ylabel('fraction of total enzyme mass', size=fs)
ax.annotate(c, (4, 0.35), size=fs, color='0.5')
plt.tight_layout()
plt.savefig('../res/hist_%s.svg'%c)
# ax.spines['bottom'].set_visible(False)
'''
hpad=None
else:
plt.figure(figsize=figsize)
ax_up = plt.subplot2grid((4, 2), (0, 0), colspan=2, rowspan=1)
ax_low = plt.subplot2grid((4, 2), (1, 0), colspan=2, rowspan=3)
# set upper axis properties
ax = ax_up
ax.set_xticklabels([])
ax.set_ylim(45,60)
ax.set_xlim(0,1)
ax.set_yticks([40, 60])
ax.set_yticklabels(['', 60])
d = .015
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d,+d),(-d,+d), **kwargs)
ax.bar(bin_edges[:-1],hist,color=C,width=0.02)
ax.set_ylabel('')
ax = ax_low
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d,+d),(1-d,1+d), **kwargs)
ax.bar(bin_edges[:-1],hist,color=C,width=0.02)
ax.set_ylim(0,ylim)
ax.set_yticks(np.arange(0, ylim-2, 5))
ax.set_yticklabels([0,5,10,''])
# set lower axis properties
ax.set_ylabel('')
ax.tick_params(right=0, top=0, direction='out', labelsize=fs)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0,1)
plt.tight_layout(h_pad=hpad)
plt.savefig('../res/hist_%s.svg'%c)
#%%
''' | mit |
DaveBackus/Data_Bootcamp | Code/Lab/BDS_firm_size.py | 1 | 2585 | """
Business Dynamics Statistics (BDS) from the US Census
Links
* http://www.census.gov/ces/dataproducts/bds/data.html
* http://www.census.gov/ces/dataproducts/bds/data_firm.html
* http://www.census.gov/ces/pdf/BDS_2013_Codebook.pdf
* http://fivethirtyeight.com/features/the-next-amazon-or-apple-or-ge-is-probably-failing-right-now/
* https://www.newyorkfed.org/medialibrary/media/research/staff_reports/sr707.pdf
Prepared for Data Bootcamp course at NYU
* http://databootcamp.nyuecon.com/
* https://github.com/DaveBackus/Data_Bootcamp/Code/Lab
Written by Dave Backus, February 2016
Created with Python 3.5
"""
import sys
import pandas as pd
#import matplotlib.pyplot as plt
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__, '\n')
#%%
"""
firm sizes
"""
url = 'http://www2.census.gov/ces/bds/firm/bds_f_sz_release.csv'
raw = pd.read_csv(url)
print('\nDataframe dimensions:', raw.shape)
print('\nVariables and dtypes:\n', raw.dtypes, sep='')
#print('Firm size categories:\n', fsz['fsize'].head(12), sep='')
# clean up size labels
# http://pandas.pydata.org/pandas-docs/stable/text.html#splitting-and-replacing-strings
#raw['fsize'] = raw['fsize'].str.split(n=1).str[1]
#print('\nEdited firm size categories:\n', raw['fsize'].head(12), sep='')
"""
# exam data
years = [2008, 2009, 2010]
d13 = raw[raw['year2'].isin(years)][['fsize', 'Firms', 'Emp']]
d13.to_dict('list')
"""
#%%
"""
year2 = date
fsize = size category
Firms = number of firms in category
firmdeath_firms = number of exits
"""
n1 = 2011
n2 = 2013
years = [n1, n2]
fsz = raw[raw['year2'].isin(years)]
fsz = fsz[['year2', 'fsize', 'Emp']]
fszp = fsz.pivot('fsize', 'year2', 'Emp')
fszp['PctChEmp'] = 100*(fszp[n2]/fszp[n1]-1)
fszp['PctChEmp'].plot(kind='barh')
#%%
# =============================================================================
"""
firm ages
"""
url = 'http://www2.census.gov/ces/bds/firm/bds_f_age_release.csv'
raw = pd.read_csv(url)
print('\nDataframe dimensions:', raw.shape)
print('\nVariables and dtypes:\n', raw.dtypes, sep='')
#print('Firm size categories:\n', fsz['fsize'].head(12), sep='')
#%%
# clean up size labels
# http://pandas.pydata.org/pandas-docs/stable/text.html#splitting-and-replacing-strings
raw['fsize'] = raw['fsize'].str.split(n=1).str[1]
#print('\nEdited firm size categories:\n', raw['fsize'].head(12), sep='')
#%%
"""
year2 = date
fsize = size category
Firms = number of firms in category
firmdeath_firms = number of exits
"""
fsz = raw[raw['year2'] >= 2012]
fsz = fsz.set_index(['year2', 'fsize'])
fsz = fsz['Firms']/10**6
fsz
| mit |
Gamecredits-Universe/Gamecredits-electrum-client | plugins/plot.py | 1 | 3575 | from PyQt4.QtGui import *
from electrum_gmc.plugins import BasePlugin, hook
from electrum_gmc.i18n import _
import datetime
from electrum_gmc.util import format_satoshis
from electrum_gmc.bitcoin import COIN
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def is_available(self):
if flag_matlib:
return True
else:
return False
@hook
def export_history_dialog(self, window, hbox):
wallet = window.wallet
history = wallet.get_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(wallet, history))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self, wallet, history):
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans = 0
pending_trans = 0
counter_trans = 0
balance = 0
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_Val.append(1.*balance/COIN)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans += 1
pass
else:
unknown_trans += 1
else:
pending_trans += 1
value_val.append(1.*value/COIN)
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('GMC')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 |
boada/desCluster | legacy/stats/calc_mass.py | 4 | 1512 | from math import sqrt
from astLib import astStats
from astLib import astCalc
import pandas as pd
from glob import glob
from bootstrap import bootstrap
def calcVD_big(data):
return astStats.biweightScale(data, tuningConstant=9.0)
def calcVD_small(data):
return astStats.gapperEstimator(data)
def calc_mass(data):
if len(data) > 10:
vd = calcVD_big(data['LOSV'].values)
up, low = bootstrap(data['LOSV'].values, astStats.biweightScale,
alpha=0.32, tuningConstant=9.0)
else:
vd = calcVD_small(data['LOSV'].values)
up, low = bootstrap(data['LOSV'].values, astStats.gapperEstimator,
alpha=0.32)
# print vd, abs(vd-up), abs(vd-low),
avgz = astStats.biweightLocation(data['redshift'].values, tuningConstant=6.0)
r200 = sqrt(3) * vd /(10*astCalc.H0 * astCalc.Ez(avgz))
r200up = sqrt(3) * up /(10*astCalc.H0 * astCalc.Ez(avgz))
r200low = sqrt(3) * low /(10*astCalc.H0 * astCalc.Ez(avgz))
# print r200, abs(r200-r200up), abs(r200-r200low),
a = 3 * sqrt(3) * 1000**3 * 3.08E19/(10*astCalc.H0 * astCalc.Ez(avgz) *\
6.67384E-11)
m200 = a * vd**3
# propagate errors
m200low = a * vd**2 * 3 * low
m200up = a * vd**2 * 3 * up
print vd, m200/1.9891E30, m200low/1.9891E30, m200up/1.9891E30
return data, m200, vd
files = glob('*members.csv')
for f in files:
data = pd.read_csv(f)
print f.split('_')[0],
mass = calc_mass(data)
#print len(data), mass/1.9891E30
| mit |
phoebe-project/phoebe2-docs | 2.1/tutorials/gravb_bol.py | 1 | 3219 | #!/usr/bin/env python
# coding: utf-8
# Gravity Brightening/Darkening (gravb_bol)
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# In[3]:
b.add_dataset('lc', dataset='lc01')
b.add_dataset('mesh', times=[0], columns=['intensities*'])
# Relevant Parameters
# --------------------
#
# The 'gravb_bol' parameter corresponds to the β coefficient for gravity darkening corrections.
# In[4]:
print(b['gravb_bol'])
# In[5]:
print(b['gravb_bol@primary'])
# If you have a logger enabled, PHOEBE will print a warning if the value of gravb_bol is outside the "suggested" ranges. Note that this is strictly a warning, and will never turn into an error at [b.run_compute()](../api/phoebe.frontend.bundle.Bundle.run_compute.md).
#
# You can also manually call [b.run_checks()](../api/phoebe.frontend.bundle.Bundle.run_checks.md). The first returned item tells whether the system has passed checks: True means it has, False means it has failed, and None means the tests pass but with a warning. The second argument tells the first warning/error message raised by the checks.
#
# The checks use the following "suggested" values:
# * teff 8000+: gravb_bol >= 0.9 (suggest 1.0)
# * teff 6600-8000: gravb_bol 0.32-1.0
# * teff 6600-: grav_bol < 0.9 (suggest 0.32)
# In[6]:
print(b.run_checks())
# In[7]:
b['teff@primary'] = 8500
b['gravb_bol@primary'] = 0.8
print(b.run_checks())
# In[8]:
b['teff@primary'] = 7000
b['gravb_bol@primary'] = 0.2
print(b.run_checks())
# In[9]:
b['teff@primary'] = 6000
b['gravb_bol@primary'] = 1.0
print(b.run_checks())
# Influence on Intensities
# ------------------
# In[26]:
b['teff@primary'] = 6000
b['gravb_bol@primary'] = 0.32
# In[29]:
b.run_compute(model='gravb_bol_32')
# In[30]:
afig, mplfig = b['primary@mesh01@gravb_bol_32'].plot(fc='intensities', ec='None', show=True)
# In[31]:
b['gravb_bol@primary'] = 1.0
# In[32]:
b.run_compute(model='gravb_bol_10')
# In[33]:
afig, mplfig = b['primary@mesh01@gravb_bol_10'].plot(fc='intensities', ec='None', show=True)
# Comparing these two plots, it is essentially impossible to notice any difference between the two models. But if we compare the intensities directly, we can see that there is a subtle difference, with a maximum difference of about 3%.
# In[37]:
np.nanmax((b.get_value('intensities', component='primary', model='gravb_bol_32') - b.get_value('intensities', component='primary', model='gravb_bol_10'))/b.get_value('intensities', component='primary', model='gravb_bol_10'))
# In[ ]:
| gpl-3.0 |
cgrohman/ponies | bet_strategy/bets.py | 1 | 19910 | from stats import Stats
from race import Race
from horse import Horse
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
import sys
import pdb
import itertools
from utilities import hook
from utilities import lineno
import utilities
SCRIPT_NAME = 'bets.py'
SCRIPT_VERSION = '1.0'
BREAKAGE = 1 # 1 = one digit after period (rounds down to nearest tenth)
MIN_PAYOUT = 0.4 # $2.40 is the minimum payout
#------------------------------------------------------------------------------
def one_two_overall(races,stat,DIFF=1):
'''
exactas: 1/2, All
DEPRICATED - use exactas()
'''
stat.name = "One/Two Overall"
for race in races:
logger.info('Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
exacta_payout = float(race.exacta['payout'])
exacta_bet= int(race.exacta['bet_amount'])
horses = race.sortedHorseOdds()
if not horses:
logger.warning('No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
continue
if one_two_overall_conditions(horses,DIFF):
cost_of_bet = (len(horses)-1)*2*exacta_bet
WON=False
if (horses[0].finish_position['position'] == '1' or horses[1].finish_position['position'] == '1'):
WON=True
stat.races_bet.append((race,WON))
logger.debug('WON- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number,exacta_payout-cost_of_bet))
else:
stat.races_bet.append((race,WON))
logger.warning('LOST- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
stat.appendBet([cost_of_bet, exacta_payout, WON])
return()
#------------------------------------------------------------------------------
def one_two_overall_conditions(horses, DIFF):
flag = False
if float(horses[1].odds)*DIFF <= float(horses[2].odds):
flag = True
return(flag)
#------------------------------------------------------------------------------
def two_three_overall(races,stat,DIFF=1):
'''
Exactas: 2/3, All
DEPRICATED - use exactas()
'''
stat.name = "Two/Three Overall"
for race in races:
exacta_payout = float(race.exacta['payout'])
exacta_bet= int(race.exacta['bet_amount'])
horses = race.sortedHorseOdds()
if not horses:
logger.warning('No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
continue
elif len(horses)<4:
logger.warning('Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
continue
if float(horses[2].odds)*DIFF <= float(horses[3].odds):
cost_of_bet = (len(horses)-1)*2*exacta_bet
WON=False
if (horses[1].finish_position['position'] == '1' or horses[2].finish_position['position'] == '1'):
WON=True
stat.races_bet.append((race,WON))
logger.debug('WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number,exacta_payout-cost_of_bet))
else:
logger.warning('LOST- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
stat.appendBet([cost_of_bet, exacta_payout, WON])
return()
#------------------------------------------------------------------------------
def exacta(race, stat, bet_name, first, second='All', DIFF=1):
'''
Exactas ex; first = [1,2] second='All'
'''
stat.name = bet_name
exacta_payout = float(race.exacta['payout'])
exacta_bet= int(race.exacta['bet_amount'])
ordered_horses_odds = race.sortedHorseOdds()
if not ordered_horses_odds:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
elif len(ordered_horses_odds)<4:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
first_horse_list, first = build_horse_list(first, ordered_horses_odds)
second_horse_list, second = build_horse_list(second, ordered_horses_odds)
outcome=0
if float(ordered_horses_odds[max(first + second)-1].odds)*DIFF <= float(ordered_horses_odds[max(first + second)].odds):
cost_of_bet = calculate_bet_cost([first_horse_list, second_horse_list], exacta_bet, True)
outcome-=cost_of_bet
WON=False
first_flag = did_horse_hit(first_horse_list, '1')
second_flag = did_horse_hit(second_horse_list, '2')
if first_flag and second_flag:
WON=True
outcome += exacta_payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number,exacta_payout-cost_of_bet))
else:
hook(SCRIPT_NAME, "INFO", "MEDIUM", lineno(), 'LOST- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, -cost_of_bet))
stat.races_bet.append((race, WON))
stat.appendBet([cost_of_bet, exacta_payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def exacta_box(race, stat, first, second='All', DIFF=1):
'''
Exactas ex; first = [1,2] second='All'
'''
stat.name = 'Exacta: {} over {}'.format(first, second)
exacta_payout = float(race.exacta['payout'])
exacta_bet = int(race.exacta['bet_amount'])
ordered_horses_odds = race.sortedHorseOdds()
if not ordered_horses_odds:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
if len(ordered_horses_odds)<5:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
first_horse_list = []
for i in first:
first_horse_list.append(ordered_horses_odds[i-1])
outcome = 0
if float(ordered_horses_odds[max(first)-1].odds)*DIFF <= float(ordered_horses_odds[max(first)].odds):
cost_of_bet = len(first)*(len(first)-1)*exacta_bet
calc = calculate_bet_cost([first_horse_list, first_horse_list], exacta_bet, True)
if cost_of_bet != calc:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Bet costs are not equal: {} calced: {}'.format(cost_of_bet, calc))
outcome -= cost_of_bet
WON = False
first_flag,second_flag = False,False
for h in first_horse_list:
if h.finish_position['position'] == '1':
first_flag = True
elif h.finish_position['position'] == '2':
second_flag = True
if first_flag and second_flag:
WON = True
outcome += exacta_payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number,exacta_payout-cost_of_bet))
else:
hook(SCRIPT_NAME, "INFO", "MEDIUM", lineno(), 'LOST- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
stat.races_bet.append((race,WON))
stat.appendBet([cost_of_bet, exacta_payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def trifecta(race, stat, bet_name, first, second='All', third='All', DIFF=1, purse_min=0):
'''
Trifectas ex; first = [1,2,3] second = [1,2,3] third = [1,2,3]
'''
stat.name = bet_name
try:
trifecta_payout = float(race.trifecta['payout'])
trifecta_bet = float(race.trifecta['bet_amount'])
except:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Race did not contain trifecta data- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
ordered_horses_odds = race.sortedHorseOdds()
if not ordered_horses_odds:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
if len(ordered_horses_odds)<5:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
# List of horses based on odds
first_horse_list, first = build_horse_list(first, ordered_horses_odds)
second_horse_list, second = build_horse_list(second, ordered_horses_odds)
third_horse_list, third = build_horse_list(third, ordered_horses_odds)
outcome = 0
all_positions = first + second + third
if float(ordered_horses_odds[max(all_positions)-1].odds)*DIFF <= float(ordered_horses_odds[max(all_positions)].odds) and float(race.trifecta['pool']) >= purse_min:
hook(SCRIPT_NAME, "INFO", "HIGH", lineno(), 'Position/odds: {}/{} {}/{}'.format(max(all_positions), ordered_horses_odds[max(all_positions)].odds, max(all_positions)+1, ordered_horses_odds[max(all_positions)+1].odds))
cost_of_bet = calculate_bet_cost([first_horse_list, second_horse_list, third_horse_list], trifecta_bet, True)
outcome -= cost_of_bet
WON = False
first_flag = did_horse_hit(first_horse_list, '1')
second_flag = did_horse_hit(second_horse_list, '2')
third_flag = did_horse_hit(third_horse_list, '3')
if first_flag and second_flag and third_flag:
WON = True
outcome += trifecta_payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, trifecta_payout-cost_of_bet))
else:
hook(SCRIPT_NAME, "INFO", "MEDIUM", lineno(), 'LOST- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, -cost_of_bet))
stat.races_bet.append((race,WON))
stat.appendBet([cost_of_bet, trifecta_payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def superfecta(race, stat, bet_name, first, second='All', third='All', fourth='All', DIFF=1, purse_min = 0):
'''
Trifectas ex; first = [1,2,3] second = [1,2,3] third = [1,2,3]
'''
stat.name = bet_name
try:
superfecta_payout = float(race.superfecta['payout'])
superfecta_bet = float(race.superfecta['bet_amount'])
except:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Race did not contain superfecta data- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
if superfecta_bet == 0:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Race did not contain superfecta data- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
ordered_horses_odds = race.sortedHorseOdds()
if not ordered_horses_odds:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
if len(ordered_horses_odds)<6:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
# List of horses based on odds
first_horse_list, first = build_horse_list(first, ordered_horses_odds)
second_horse_list, second = build_horse_list(second, ordered_horses_odds)
third_horse_list, third = build_horse_list(third, ordered_horses_odds)
fourth_horse_list, fourth = build_horse_list(fourth, ordered_horses_odds)
outcome = 0
all_positions = first + second + third + fourth
if float(ordered_horses_odds[max(all_positions)-1].odds)*DIFF <= float(ordered_horses_odds[max(all_positions)].odds):
hook(SCRIPT_NAME, "INFO", "HIGH", lineno(), 'Position/odds: {}/{} {}/{}'.format(max(all_positions), ordered_horses_odds[max(all_positions)-1].odds, max(all_positions)+1, ordered_horses_odds[max(all_positions)].odds))
cost_of_bet = calculate_bet_cost([first_horse_list, second_horse_list, third_horse_list, fourth_horse_list], superfecta_bet, True)
outcome -= cost_of_bet
WON = False
first_flag = did_horse_hit(first_horse_list, '1')
second_flag = did_horse_hit(second_horse_list, '2')
third_flag = did_horse_hit(third_horse_list, '3')
fourth_flag = did_horse_hit(fourth_horse_list, '4')
if first_flag and second_flag and third_flag and fourth_flag:
WON = True
outcome += superfecta_payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, superfecta_payout-cost_of_bet))
else:
hook(SCRIPT_NAME, "INFO", "MEDIUM", lineno(), 'LOST- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, -cost_of_bet))
stat.races_bet.append((race,WON))
stat.appendBet([cost_of_bet, superfecta_payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def straight(race, stat, bet_name, horse_num, finish_wps, DIFF=1, purse_min = 0):
"""
horse: is the odds ordered horse ('1' = horse with best odds to win)
finish: list of positions that 'horse' can finish in
"""
if finish_wps not in ['WIN', 'PLACE', 'SHOW']:
hook(SCRIPT_NAME, "FATAL", "XXX", lineno(), "Finish input incorrect, expected ['WIN', 'PLACE', 'SHOW'], got:{} ".format(finish_wps))
sys.exit()
stat.name = bet_name
ordered_horses_odds = race.sortedHorseOdds()
if not ordered_horses_odds:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'No odds for any horses- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
if len(ordered_horses_odds)<horse_num+1:
hook(SCRIPT_NAME, "WARNING", "XXX", lineno(), 'Not enough horses for this bet- Date: {} Track: {} Race: {}'.format(race.date, race.track, race.race_number))
return(0)
outcome = 0
horse = ordered_horses_odds[horse_num - 1]
if float(horse.odds)*DIFF <= float(ordered_horses_odds[horse_num].odds):
hook(SCRIPT_NAME, "INFO", "HIGH", lineno(), 'Position/odds: {}/{}'.format(horse_num, horse.odds))
finish = []
payout = 0
if finish_wps == 'WIN':
finish = [1]
payout = float(horse.wps[0])
elif finish_wps == 'PLACE':
finish = [1, 2]
payout = float(horse.wps[1])
elif finish_wps == 'SHOW':
finish = [1, 2, 3]
payout = float(horse.wps[2])
if payout-2 < MIN_PAYOUT:
payout = MIN_PAYOUT + 2
cost_of_bet = 2
outcome -= cost_of_bet
WON = False
for f in finish:
if did_horse_hit([horse], str(f)):
WON = True
outcome += payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, payout-cost_of_bet))
if not WON:
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'LOST- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, -cost_of_bet))
stat.races_bet.append((race,WON))
stat.appendBet([cost_of_bet, payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def clf_wps(race, stat, bet_name, clf_obj, x, sc_dict, min_prob=0.8, DIFF=1, purse_min = 0):
stat.name = bet_name
low_to_high_odds = race.sortedHorseOdds()[::-1]
outcome = 0
labels = ['race_number', 'purse', 'distance', 'class_rating', 'num_in_field', 'h_odds', 'h_age', 'h_weight', 'h_gate_position', 'h_claim_value', 'h_odds_index']
horse = None
for i,horse in enumerate(low_to_high_odds):
h_index = len(low_to_high_odds)-1-i
new = pd.DataFrame([[race.race_number, race.purse, race.distance, race.class_rating, len(low_to_high_odds), horse.odds, horse.age, horse.weight, horse.gate_position, horse.claim_value, h_index]])
new.columns = labels
for col in labels:
new = scale_col(new, col, sc_dict)
clf = clf_obj['clf']
pred = clf.predict(new)
probs = clf.predict_proba(new)
#if probs[0][1] > .8 and float(horse.odds)>DIFF:
if pred == 1:
hook(SCRIPT_NAME, "INFO", "HIGH", lineno(), 'Prob/odds: {0:.2f}/{1}'.format(probs[0][1], horse.odds))
cost_of_bet = 2
outcome -= cost_of_bet
WON = False
#SHOW
finish = [1, 2, 3]
payout = float(horse.wps[2])
if payout-2 < MIN_PAYOUT:
payout = MIN_PAYOUT + 2
for f in finish:
if did_horse_hit([horse], str(f)):
WON = True
outcome += payout
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'WON - Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, payout-cost_of_bet))
if not WON:
hook(SCRIPT_NAME, "INFO", "LOW", lineno(), 'LOST- Date: {} Track: {} Race: {} Net: {}'.format(race.date, race.track, race.race_number, -cost_of_bet))
stat.races_bet.append((race,WON))
stat.appendBet([cost_of_bet, payout, WON])
return(outcome)
#------------------------------------------------------------------------------
def get_clf():
df = pd.read_csv('results/singleHorse_2017-04-21.csv')
x = df.iloc[:,:-1]
y = df.iloc[:,-1]
columns = list(x.columns)
sc_dict = sc_fit(x,columns)
for col in columns:
x = scale_col(x, col, sc_dict)
#x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
clf = GaussianNB()
nb_filter = SelectKBest(f_regression, k=5)
nb_pipe = Pipeline([('anova',nb_filter), ('nb',clf)])
nb_pipe.fit(x,y)
score = nb_pipe.score(x,y)
obj = {''}
return nb_pipe,x,sc_dict
#------------------------------------------------------------------------------
def sc_fit(df, labels):
sc_dict = {}
for label in labels:
sc = StandardScaler()
col = np.array(df[label]).T
sc.fit(col.reshape(-1,1))
sc_dict[label] = sc
return sc_dict
#------------------------------------------------------------------------------
def scale_col(df, label, sc_dict):
col = np.array(df[label]).T
try:
df[label] = sc_dict[label].transform(col.reshape(-1,1))
except:
pdb.set_trace()
return df
#------------------------------------------------------------------------------
def odds_to_horses(abs_place, ordered_horses, **params):
horse_list = []
exclude_list = []
if params:
for k,v in params.items():
exclude_list += v
exclude_list = set(exclude_list)
if abs_place != 'All':
for i in range(1, len(ordered_horses)):
if i in exclude_list:
continue
horse_list.append(ordered_horses[i-1])
else:
for i in abs_place:
horse_list.append(ordered_horses[i-1])
return horse_list
#------------------------------------------------------------------------------
def build_horse_list(abs_place, ordered_horses):
horse_list = []
pos_list = []
if abs_place == 'All':
horse_list = ordered_horses
pos_list = [0]
else:
for i in abs_place:
horse_list.append(ordered_horses[i-1])
pos_list = abs_place
return horse_list, pos_list
#------------------------------------------------------------------------------
def did_horse_hit(horses, position):
for h in horses:
if h.finish_position['position'] == position:
return True
return False
#------------------------------------------------------------------------------
def calculate_bet_cost(legs, amount, unique):
num_bets = 0
if unique:
num_bets = len([ tuple(ea) for ea in itertools.product(*legs) if len(ea) == len(set(ea)) ])
else:
num_bets = len([ tuple(ea) for ea in itertools.product(*legs) ])
return num_bets*amount
| gpl-3.0 |
robbymeals/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
satonreb/machine-learning-using-tensorflow | scripts/02_linear_regression.py | 1 | 6206 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
# Data Preparation =====================================================================================================
# Synthetic Data
# Define one-dimensional feature vector
feature = 5.0 * np.random.random(size=(1000, 1)) - 1
# Creates random noise with amplitude 0.1, which we add to the target values
noise = 0.1 * np.random.normal(scale=1, size=feature.shape)
# Defines two-dimensional target array
target_1 = 2.0 * feature + 3.0 + noise
target_2 = -1.2 * feature / 6.0 + 1.01 + noise
target = np.concatenate((target_1, target_2), axis=1)
# Split data sets into Training, Validation and Test sets
X_train_val, X_test, Y_train_val, Y_test = train_test_split(feature, target, test_size=0.33, random_state=42)
X_train, X_val, Y_train, Y_val = train_test_split(X_train_val, Y_train_val, test_size=0.33, random_state=42)
# Logistic Regression Graph Construction ===============================================================================
# Resets default graph
tf.reset_default_graph()
# Parameters
X_FEATURES = X_train.shape[1]
Y_FEATURES = Y_train.shape[1]
# Hyperparameters
BATCH_SIZE = 10
LEARNING_RATE = 0.01
EPOCHS = 100
# Get list of indices in the training set
idx = list(range(X_train.shape[0]))
# Determine total number of batches
n_batches = int(np.ceil(len(idx) / BATCH_SIZE))
# Define inputs to the model
with tf.variable_scope("inputs"):
# placeholder for input features
x = tf.placeholder(dtype=tf.float32, shape=[None, X_FEATURES], name="predictors")
# placeholder for true values
y_true = tf.placeholder(dtype=tf.float32, shape=[None, Y_FEATURES], name="target")
# Define logistic regression model
with tf.variable_scope("linear_regression"):
# Predictions are performed by Y_FEATURES neurons in the output layer
prediction = tf.layers.dense(inputs=x, units=Y_FEATURES, name="prediction")
# # Define loss function as mean square error (MSE)
loss = tf.losses.mean_squared_error(labels=y_true, predictions=prediction)
train_step = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE).minimize(loss=loss)
# Define metric ops
with tf.variable_scope("metrics"):
# Determin total RMSE
_, rmse = tf.metrics.root_mean_squared_error(labels=y_true, predictions=prediction)
# Define total r_squared score as 1 - Residual sum of squares (rss) / Total sum of squares (tss)
y_true_bar = tf.reduce_mean(input_tensor=y_true, axis=0)
tss = tf.reduce_sum(input_tensor=tf.square(x=tf.subtract(x=y_true, y=y_true_bar)), axis=0)
rss = tf.reduce_sum(input_tensor=tf.square(x=tf.subtract(x=y_true, y=prediction)), axis=0)
r_squared = tf.reduce_mean(tf.subtract(x=1.0, y=tf.divide(x=rss, y=tss)))
# Model Training =======================================================================================================
# Attaches graph to session
sess = tf.InteractiveSession()
# Initialises valuables in the graph
init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
sess.run(fetches=[init_global, init_local])
for e in range(1, EPOCHS + 1):
# At the beginning of each epoch the training data set is reshuffled in order to avoid dependence on
# input data order.
np.random.shuffle(idx)
# Creates a batch generator.
batch_generator = (idx[i * BATCH_SIZE:(1 + i) * BATCH_SIZE] for i in range(n_batches))
# Loops through batches.
for _ in range(n_batches):
# Gets a batch of row indices.
id_batch = next(batch_generator)
# Defines input dictionary
feed = {x: X_train[id_batch], y_true: Y_train[id_batch]}
# Executes the graph
sess.run(fetches=train_step, feed_dict=feed)
if e % 10 == 0:
# Evaluate metrics on training and validation data sets
train_loss = loss.eval(feed_dict={x: X_train, y_true: Y_train})
val_loss = loss.eval(feed_dict={x: X_val, y_true: Y_val})
# Prints the loss to the console
msg = ("Epoch: {e}/{epochs}; ".format(e=e, epochs=EPOCHS) +
"Train MSE: {tr_ls}; ".format(tr_ls=train_loss) +
"Validation MSE: {val_ls}; ".format(val_ls=val_loss))
print(msg)
# Model Testing ========================================================================================================
# Evaluate loss (MSE), total RMSE and R2 on test data
test_loss = loss.eval(feed_dict={x: X_test, y_true: Y_test})
rmse = rmse.eval(feed_dict={x: X_test, y_true: Y_test})
r_squared = r_squared.eval(feed_dict={x: X_test, y_true: Y_test})
# Evaluate prediction on Test data
y_pred = prediction.eval(feed_dict={x: X_test})
# Print Test loss (MSE), total RMSE and R2 in console
msg = "\nTest MSE: {test_loss}, RMSE: {rmse} and R2: {r2}".format(test_loss=test_loss, rmse=rmse, r2=r_squared)
print(msg)
# Calculates RMSE and R2 metrics using sklearn
sk_rmse = np.sqrt(mean_squared_error(y_true=Y_test, y_pred=y_pred))
sk_r2 = r2_score(y_true=Y_test, y_pred=y_pred)
print("Test sklearn RMSE: {rmse} and R2: {r2}".format(rmse=sk_rmse, r2=sk_r2))
# Comparison ===========================================================================================================
# Create array where values are sorted by feature axis.
dpoints = np.asarray(a=sorted(np.concatenate([X_test, y_pred], axis=1), key=lambda s: s[0]))
# Create figure
fig = plt.figure()
fig.suptitle(t="Prediction vs. Ground truth", fontsize=14, fontweight="bold")
# Plot comparison of predicted to ground truth values in the fist column
plt.subplot(211)
plt.plot(dpoints[:, 0], dpoints[:, 1], color="orange", linewidth=2, label="prediction")
plt.scatter(x=X_test, y=Y_test[:, 0], c="black", s=2, label="ground truth")
plt.legend()
plt.ylabel(s="target 1")
# Plot comparison of predicted to ground truth values in the second column
plt.subplot(212)
plt.plot(dpoints[:, 0], dpoints[:, 2], color="orange", linewidth=2, label="prediction")
plt.scatter(x=X_test, y=Y_test[:, 1], c="black", s=2, label="ground truth")
plt.legend()
plt.xlabel(s="feature")
plt.ylabel(s="target 2")
fig.show()
| gpl-3.0 |
sragain/pcmc-nips | infer.py | 1 | 3056 | import numpy as np
import matplotlib.pyplot as plt
from random import random
import lib.mnl_utils,lib.mmnl_utils,lib.pcmc_utils
import pickle
import os,sys
def split_samples(samples,nep,split=.25,alpha=.1):
"""splits a list of samples into nep dictionaries containing their summary
statistics
Arguments:
samples- list of (Set, choice) tuples
nep- number of ways to split input data
split- proprotion of data assigned as test data
alpha- amount of additive smoothing applied to data
"""
Ctest = {}
splitidx = int((1-split)*len(samples))
testsamples = samples[splitidx:]
for (S,choice) in testsamples:
if S not in Ctest:
Ctest[S]=np.zeros(len(S))
Ctest[S][choice]+=1
trainsamples = samples[:splitidx]
trainlist = [{} for i in range(nep)]
a = len(trainsamples)/nep
for i in range(nep):
for (S,choice) in trainsamples[i*a:(i+1)*a]:
if S not in trainlist[i]:
trainlist[i][S]=np.ones(len(S))*alpha
trainlist[i][S][choice]+=1
return trainlist,Ctest
def run_sims(samples,n=6,nsim=10,nep=5,maxiter=25,split=.25,alpha=.1):
"""
computes learning error on input models for input data for nsim simluations
consisting of traning and computing test error on nep splits of the data
Arguments:
samples- list of samples
n- number of choices in union of choice sets
nsim- number of simulations to run
nep- number of episodes per simulation
maxiter- iterations allowed to scipy.minimize when performing MLE
split- proportion of samples used for testing
alpha- amount of additive smoothing applied
"""
mnl_errors=np.empty((nsim,nep))
mmnl_errors=np.empty((nsim,nep))
pcmc_errors=np.empty((nsim,nep))
for sim in range(nsim):
print 'sim number %d' %(sim+1)
np.random.shuffle(samples)
#throw away any inferred parameters
mnl_params = None;pcmc_params = None;mmnl_params = None
#split data
trainlist,Ctest = split_samples(samples,nep,split=split,alpha=alpha)
Ctrain={}
for ep in range(nep):
#add new training data
for S in trainlist[ep]:
if S not in Ctrain:
Ctrain[S]=trainlist[ep][S]
else:
Ctrain[S]+=trainlist[ep][S]
#infer parameters
mnl_params = lib.mnl_utils.ILSR(C=Ctrain,n=n)
mmnl_params = lib.mmnl_utils.infer(C=Ctrain,n=n,x=mmnl_params,maxiter=maxiter)
pcmc_params = lib.pcmc_utils.infer(C=Ctrain,x=pcmc_params,n=n,maxiter=maxiter,delta=1)
#track errors
mnl_errors[sim,ep]=lib.mnl_utils.comp_error(x=mnl_params,C=Ctest)
mmnl_errors[sim,ep]=lib.mmnl_utils.comp_error(x=mmnl_params,C=Ctest,n=n)
pcmc_errors[sim,ep]=lib.pcmc_utils.comp_error(x=pcmc_params,C=Ctest)
np.save('mnl_errors.npy',mnl_errors)
np.save('mmnl_errors.npy',mmnl_errors)
np.save('pcmc_errors.npy',pcmc_errors)
np.save('pcmc_params.npy',lib.pcmc_utils.comp_Q(pcmc_params))
if __name__=='__main__':
nsim=100;nep=15;n=6;alpha=.1;samples=pickle.load(open('worklist.p','rb'));split=.25
#nsim=100;nep=15;n=8;alpha=5;samples=pickle.load(open('shoplist.p','rb'));split=.25
run_sims(samples=samples,n=n,nsim=nsim,nep=nep,split=split,alpha=alpha)
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/quandl/get_table.py | 1 | 1132 | from quandl.model.datatable import Datatable
from quandl.errors.quandl_error import LimitExceededError
from .api_config import ApiConfig
from .message import Message
import warnings
import copy
def get_table(datatable_code, **options):
if 'paginate' in options.keys():
paginate = options.pop('paginate')
else:
paginate = None
data = None
page_count = 0
while True:
next_options = copy.deepcopy(options)
next_data = Datatable(datatable_code).data(params=next_options)
if data is None:
data = next_data
else:
data.extend(next_data)
if page_count >= ApiConfig.page_limit:
raise LimitExceededError(Message.WARN_DATA_LIMIT_EXCEEDED)
next_cursor_id = next_data.meta['next_cursor_id']
if next_cursor_id is None:
break
elif paginate is not True and next_cursor_id is not None:
warnings.warn(Message.WARN_PAGE_LIMIT_EXCEEDED, UserWarning)
break
page_count = page_count + 1
options['qopts.cursor_id'] = next_cursor_id
return data.to_pandas()
| mit |
vermouthmjl/scikit-learn | sklearn/tests/test_naive_bayes.py | 72 | 19944 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
IntersectAustralia/asvo-tao | core/sageimport_mpi_HDF/ApplyToAllTables.py | 1 | 2341 | import pg
import getpass
import math
import string
import sys
import settingReader
import numpy
import matplotlib.pyplot as plt
import DBConnection
import logging
class ProcessTables(object):
def __init__(self,Options):
'''
Constructor
'''
self.Options=Options
self.DBConnection=DBConnection.DBConnection(Options)
logging.info('Connection to DB is open...')
def CloseConnections(self):
self.DBConnection.CloseConnections()
logging.info('Connection to DB is Closed...')
def ApplyQueryToTable(self,TableName,ServerIndex):
logging.info(TableName)
SQLStat="INSERT INTO GalaxyMapping SELECT globalindex,'@TABLENAME' from @TABLENAME;"
SQLStat= string.replace(SQLStat,"@TABLENAME",TableName)
self.DBConnection.ExecuteNoQuerySQLStatment(SQLStat,ServerIndex)
def ExecuteQueries(self,ServerIndex):
CreateSQLSt="DROP TABLE IF EXISTS GalaxyMapping; CREATE Table GalaxyMapping (globalindex bigint,tablename varchar(500));"
self.DBConnection.ExecuteNoQuerySQLStatment(CreateSQLSt,ServerIndex)
GetTablesListSt="select table_name from information_schema.tables where table_schema='public';"
TablesList=self.DBConnection.ExecuteQuerySQLStatment(GetTablesListSt,ServerIndex)
for Table in TablesList:
TableName=Table[0]
if string.find(TableName,self.Options['PGDB:TreeTablePrefix'])==0:
self.ApplyQueryToTable(TableName,ServerIndex)
CreateSQLSt="ALTER TABLE GalaxyMapping ADD PRIMARY KEY (globalindex); "
self.DBConnection.ExecuteNoQuerySQLStatment(CreateSQLSt,ServerIndex)
if __name__ == '__main__':
logging.basicConfig(filename='ApplytoAllTables.log',level=logging.DEBUG,format='%(asctime)s %(message)s')
logging.info('Starting DB processing')
[CurrentSAGEStruct,Options]=settingReader.ParseParams("settings.xml")
ProcessTablesObj=ProcessTables(Options)
for i in range(0,int(Options['PGDB:ServersCount'])):
ProcessTablesObj.ExecuteQueries(i)
| gpl-3.0 |
RK900/Flu-Prediction | Flu-Models/Flu-Tree.py | 1 | 1428 | '''
Author: Rohan Koodli
A general Flu Prediction algorithm that can be used with H1N1 & H3N2 HA & NA proteins
'''
from Bio import SeqIO
def predictFluSeq(seqs): # Seqs is the file path of your FASTA files
#returns cross-val scores and MSE
X0 = []
# adding to X and y
for i in range(0, len(seqs) - 1):
X0.append(seqs[i].seq)
y0 = []
for j in range(1, len(seqs)):
y0.append(seqs[i].seq)
from Encoding_v2 import encoding
# Encoding letters into numbers
X = []
for k in range(len(X0)):
encoded_X = encoding(X0[k])
X.append(encoded_X)
y = []
for l in range(len(y0)):
encoded_y = encoding(y0[l])
y.append(encoded_y)
from sklearn import ensemble, cross_validation, metrics
# Cross-Validation
rfr = ensemble.RandomForestRegressor()
rfrscores = cross_validation.cross_val_score(rfr, X, y, cv=2)
cv_score = ("Random Forests cross-validation score", rfrscores)
avg_cv_score = ("Average Cross-Val Accuracy: %0.2f (+/- %0.2f)" % (rfrscores.mean()*100, rfrscores.std() *100))
# Mean Squared Error
X_train,X_test,y_train,y_test = cross_validation.train_test_split(X,y,test_size=0.5,random_state=50)
rfr.fit(X_train,y_train)
y_predicted = rfr.predict(X_test)
mse_score = ('Random Forests MSE:', metrics.mean_squared_error(y_test,y_predicted))
return cv_score, avg_cv_score, mse_score
| gpl-3.0 |
pkruskal/scikit-learn | sklearn/lda.py | 72 | 17751 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
meghana1995/sympy | sympy/physics/quantum/tensorproduct.py | 64 | 13572 | """Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import u, range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' ')))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| bsd-3-clause |
whatbeg/Data-Analysis | MultiClassifier_for_Census_classification/main.py | 2 | 2282 |
from optparse import OptionParser
import os
import sys
import copy
import numpy as np
import pandas as pd
import scipy as sp
def get_data():
train_data = np.loadtxt('train_tensor.data', delimiter=',')
test_data = np.loadtxt('test_tensor.data', delimiter=',')
train_label = np.loadtxt('train_label.data')
test_label = np.loadtxt('test_label.data')
return train_data, train_label, test_data, test_label
def train(Model, model_name):
train_data, train_label, test_data, test_label = get_data()
model = Model
model.fit(train_data, train_label)
predict = model.predict(test_data)
count = 0
for left, right in zip(predict, test_label):
if int(left) == int(right):
count += 1
print("{} accuracy : {}".format(model_name, float(count) / len(test_label)))
def get_model_from_name(model_name):
if model_name == "MultinomialNB":
from sklearn.naive_bayes import MultinomialNB
return MultinomialNB(alpha=1.0), model_name
elif model_name == "BernoulliNB":
from sklearn.naive_bayes import BernoulliNB
return BernoulliNB(alpha=0.1), model_name
elif model_name == "RandomForest":
from sklearn.ensemble import RandomForestClassifier
return RandomForestClassifier(n_estimators=2), model_name
elif model_name == "DecisionTree":
from sklearn import tree
return tree.DecisionTreeClassifier(), model_name
elif model_name == "LogisticRegression":
from sklearn.linear_model.logistic import LogisticRegression
return LogisticRegression(), model_name
elif model_name == "KNN":
from sklearn.neighbors import KNeighborsClassifier
return KNeighborsClassifier(n_neighbors=2), model_name
elif model_name == "NeuralNetwork":
from sklearn.neural_network import MLPClassifier
return MLPClassifier(solver='adam', alpha=1e-2, hidden_layer_sizes=(100, 50), random_state=1), model_name
else:
raise ValueError("Not valid model")
def family():
for MODELs in ["MultinomialNB", "BernoulliNB", "RandomForest", "DecisionTree", "LogisticRegression",
"KNN", "NeuralNetwork"]:
Model, model_name = get_model_from_name(MODELs)
train(Model, model_name)
family()
| apache-2.0 |
zhangmianhongni/MyPractice | Python/MachineLearning/ud120-projects-master/tools/email_preprocess.py | 10 | 2628 | #!/usr/bin/python
import pickle
import cPickle
import numpy
from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
"""
this function takes a pre-made list of email texts (by default word_data.pkl)
and the corresponding authors (by default email_authors.pkl) and performs
a number of preprocessing steps:
-- splits into training/testing sets (10% testing)
-- vectorizes into tfidf matrix
-- selects/keeps most helpful features
after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
4 objects are returned:
-- training/testing features
-- training/testing labels
"""
### the words (features) and authors (labels), already largely preprocessed
### this preprocessing will be repeated in the text learning mini-project
authors_file_handler = open(authors_file, "r")
authors = pickle.load(authors_file_handler)
authors_file_handler.close()
words_file_handler = open(words_file, "r")
word_data = cPickle.load(words_file_handler)
words_file_handler.close()
### test_size is the percentage of events assigned to the test set
### (remainder go into training)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
### text vectorization--go from strings to lists of numbers
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train_transformed = vectorizer.fit_transform(features_train)
features_test_transformed = vectorizer.transform(features_test)
### feature selection, because text is super high dimensional and
### can be really computationally chewy as a result
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(features_train_transformed, labels_train)
features_train_transformed = selector.transform(features_train_transformed).toarray()
features_test_transformed = selector.transform(features_test_transformed).toarray()
### info on the data
print "no. of Chris training emails:", sum(labels_train)
print "no. of Sara training emails:", len(labels_train)-sum(labels_train)
return features_train_transformed, features_test_transformed, labels_train, labels_test
| apache-2.0 |
sayan2207/Otaku-senpai | generate-clean-ratings.py | 1 | 1492 | import pandas as pd
import os
from zipfile import ZipFile
import json
ZIP_FILE = "anime-recommendations-database"
DATA_FILE = "rating.csv"
NEW_DATA_FILE = "rating_cleaned.json"
DIR = "DATABASE"
def open_zip(zipfile, datafile):
zipfile = '{0}.zip'.format(zipfile)
print("Extracting " + zipfile)
with ZipFile(zipfile, 'r') as myzip:
myzip.extract(datafile)
def create_rated_database(datafile):
df = pd.read_csv(datafile, encoding = 'utf8')
df = clean_database(df)
return df
def clean_database(df):
data = {}
for i in range( len(df) ):
percent = int((i/len(df))*10000)/100
print(str(percent) + "% done")
r = int(df.loc[i, "rating"])
u = str(df.loc[i,"user_id"])
a = str(df.loc[i,"anime_id"])
if r != -1:
if u not in data:
data[u] = {}
data[u][a] = r
print("100% done")
return data
def generate_rated_database(df, datafile, dirc):
if not os.path.exists(dirc):
os.makedirs(dirc)
datafile = dirc + "\\" + datafile
fp= open(datafile, "w")
json.dump(df, fp, indent=4)
fp.close()
print("Database created in " + datafile)
def clean_traces(datafiles):
for datafile in datafiles:
os.remove(datafile, dir_fd=None)
open_zip(ZIP_FILE, DATA_FILE)
df = create_rated_database(DATA_FILE)
generate_rated_database(df, NEW_DATA_FILE, DIR)
clean_traces([DATA_FILE])
| mit |
av8ramit/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 46 | 6682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
matthiascy/panda3d | direct/src/ffi/jGenPyCode.py | 8 | 3101 | ##############################################################
#
# This module should be invoked by a shell-script that says:
#
# python -c "import direct.ffi.jGenPyCode" <arguments>
#
# Before invoking python, the shell-script may need to set
# these environment variables, to make sure that everything
# can be located appropriately.
#
# PYTHONPATH
# PATH
# LD_LIBRARY_PATH
#
##############################################################
import sys, os
##############################################################
#
# Locate the 'direct' tree and the 'pandac' tree.
#
##############################################################
DIRECT=None
PANDAC=None
for dir in sys.path:
if (DIRECT is None):
if (dir != "") and os.path.exists(os.path.join(dir,"direct")):
DIRECT=os.path.join(dir,"direct")
if (PANDAC is None):
if (dir != "") and (os.path.exists(os.path.join(dir,"pandac"))):
PANDAC=os.path.join(dir,"pandac")
if (DIRECT is None):
sys.exit("Could not locate the 'direct' python modules")
if (PANDAC is None):
sys.exit("Could not locate the 'pandac' python modules")
##############################################################
#
# Locate direct/src/extensions.
#
# It could be inside the direct tree. It may be underneath
# a 'src' subdirectory. Or, the direct tree may actually be
# a stub that points to the source tree.
#
##############################################################
EXTENSIONS=None
if (EXTENSIONS is None):
if os.path.isdir(os.path.join(DIRECT,"src","extensions_native")):
EXTENSIONS=os.path.join(DIRECT,"src","extensions_native")
if (EXTENSIONS is None):
if os.path.isdir(os.path.join(DIRECT,"extensions_native")):
EXTENSIONS=os.path.join(DIRECT,"extensions_native")
if (EXTENSIONS is None):
if os.path.isdir(os.path.join(DIRECT,"..","..","direct","src","extensions_native")):
EXTENSIONS=os.path.join(DIRECT,"..","..","direct","src","extensions_native")
if (EXTENSIONS is None):
sys.exit("Could not locate direct/src/extensions_native")
##############################################################
#
# Call genpycode with default paths.
#
##############################################################
from direct.ffi import DoGenPyCode
DoGenPyCode.outputCodeDir = PANDAC
DoGenPyCode.outputHTMLDir = os.path.join(PANDAC,"..","doc")
DoGenPyCode.directDir = DIRECT
DoGenPyCode.extensionsDir = EXTENSIONS
DoGenPyCode.interrogateLib = r'libdtoolconfig'
DoGenPyCode.codeLibs = ['libpandaexpress','libpanda','libpandaphysics','libpandafx','libp3direct','libpandaskel','libpandaegg','libpandaode']
DoGenPyCode.etcPath = [os.path.join(PANDAC,"input")]
DoGenPyCode.pythonSourcePath = [DIRECT]
DoGenPyCode.native = 1
#print "outputDir = ", DoGenPyCode.outputDir
#print "directDir = ", DoGenPyCode.directDir
#print "extensionsDir = ", DoGenPyCode.extensionsDir
#print "interrogateLib = ", DoGenPyCode.interrogateLib
#print "codeLibs = ", DoGenPyCode.codeLibs
#print "etcPath = ", DoGenPyCode.etcPath
#print "native = ", DoGenPyCode.native
DoGenPyCode.run()
os._exit(0)
| bsd-3-clause |
SzymonPrajs/astrotools | astrotools/reader.py | 1 | 6978 | import os
import json
import numpy as np
import pandas as pd
from .filters import zero_point, mask_present_filters, mask_present_instrument
__all__ = ['read_slap',
'read_array',
'slice_band',
'slice_band_generator',
'normalize_lc',
'read_osc',
'rename_data_columns']
def read_slap(file_name: str) -> pd.DataFrame:
"""
Read light curve data files as originally formatted for SLAP.
This is a basic format of: mjd, flux, flux_err, band
Parameters
----------
file_name : str
Path to the data file
Returns
-------
data : `pandas.DataFrame`
DataFrame object containing a light curve in the data structure
throughout this repository
"""
if not os.path.exists(file_name):
raise ValueError('Path does not exists: ' + file_name)
data = pd.read_csv(file_name, header=None, delim_whitespace=True, comment='#')
data.columns = ['mjd', 'flux', 'flux_err', 'band']
return data
def read_array(mjd=None, flux=None, flux_err=None, band=None) -> pd.DataFrame:
"""
Convert arrays of MJD, flux, flux_err and band into a common format
pandas DataFrame object.
Parameters
----------
mjd : array-like
Numerical array-like object of MJD values.
Must be specified.
flux : array-like
Numerical array-like object of MJD values.
Must be specified.
flux_err : numerical or array-like
Either a single numerical or array-like object of flux_err.
If array-like then the shape of this array must match that of `flux`.
band : str or array-like
Either a single string or array-like object of bands.
If array-like then the shape of this array must match that of `flux`.
Returns
-------
df : `pandas.DataFrame`
DataFrame object containing light curve data that can be used
in most routines throughout this repository.
"""
df = pd.DataFrame()
if mjd is None or flux is None or mjd.shape != flux.shape:
raise ValueError('Data must contain both MJD and flux values and be of the same dimension')
try:
hasattr(mjd, '__iter__')
hasattr(flux, '__iter__')
except TypeError:
print('`mjd` and `flux` must be array-like')
try:
df['mjd'] = np.array(mjd).astype(float)
df['flux'] = np.array(flux).astype(float)
except ValueError:
print('`mjd` and `flux` must be numerical')
if hasattr(flux_err, '__iter__') and flux.shape == flux_err.shape:
df['flux_err'] = np.array(flux_err).astype(float)
else:
df['flux_err'] = float(flux_err)
df['band'] = band
return df
def read_osc(json_file_path):
"""
Read light curves from Open Supernova Catalogue (OSC)
JSON files and parse into the common DataFrame format.
Parameters
----------
json_file_path : str
Path to the OSC JSON file
Returns
-------
data : `pandas.DataFrame`
DataFrame object in the common format
"""
data = None
if not os.path.exists(json_file_path):
raise ValueError('File does not exists: ' + json_file_path)
with open(json_file_path) as json_file:
json_data = json.load(json_file)
object_name = list(json_data.keys())[0]
if 'photometry' in json_data[object_name]:
data = pd.DataFrame(json_data[object_name]['photometry'])
else:
raise ValueError('No photometry found in the JSON file')
data = rename_data_columns(data)
data = data[mask_present_filters(data['band'])]
data = data[mask_present_instrument(data['telescope'])]
data['mjd'] = data['mjd'].astype(float)
data['zp'] = data.apply(lambda x: zero_point(x['band']), axis=1)
data['flux'] = 10 ** (-0.4 * (data['mag'].astype(float) + data['zp']))
data['flux_err'] = data['mag_err'].astype(float) * 0.921034 * data['flux']
return data
def slice_band(data, band=None):
"""
Return a slice of the input DataFrame or a dictionary
of DataFrames indexed by the filter name.
Parameters
----------
data : `pandas.DataFrame`
DataFrame object in the common format containing:
MJD, flux, flux_err, band
band : str or array-like, optional
If a single band is provided a single DataFrame object
will be returned, with more than one filter resulting
in a dictionary of DataFrame objects.
Returns
-------
data_dict : `pandas.DataFrame` or dict
"""
data_list = list(slice_band_generator(data, band=band))
if len(data_list) == 1:
return data_list[0]
else:
data_dict = {}
for data in data_list:
band_name = data['band'].unique()[0]
data_dict[band_name] = data
return data_dict
def slice_band_generator(data, band=None):
"""
Generator retuning a series of DataFrame objects,
each containing the light curve for just one, unique band.
Parameters
----------
band : str or array-like, optional
If band is specified a DataFrame a generator returning
objects of only that band are returned.
data : `pandas.DataFrame`
DataFrame object in the common format containing:
MJD, flux, flux_err, band
Yields
------
data : `pandas.DataFrame`
Object containing the light curve for one, unique band.
"""
if band is None:
unique_bands = data['band'].unique()
elif hasattr(band, '__iter__'):
unique_bands = band
else:
unique_bands = [band]
for band_iter in unique_bands:
yield data.query('band == "{}"'.format(band_iter))
def normalize_lc(data):
"""
Normalise light curve flux and flux_err
Parameters
----------
data : pandas.DataFrame
DataFrame object to be normalised. Must contain:
flux, flux_err
"""
norm_factor = data['flux'].max()
data['flux'] /= norm_factor
data['flux_err'] /= norm_factor
data['norm_factor'] = norm_factor
return data
def rename_data_columns(data):
# TODO: docstring
for col_name in data.columns:
col_name_lower = col_name.lower()
new_col_name = col_name_lower
if col_name_lower in ['time', 'jd', 't']:
new_col_name = 'mjd'
elif col_name_lower in ['fluxerr', 'err', 'error', 'flux_error']:
new_col_name = 'flux_err'
elif col_name_lower in ['filter', 'flt']:
new_col_name = 'band'
elif col_name_lower in ['magnitude']:
new_col_name = 'mag'
elif col_name_lower in ['magerr', 'e_magnitude', 'magnitude_error', 'mag_error']:
new_col_name = 'mag_err'
elif col_name_lower in ['zeropoint', 'zero_point']:
new_col_name = 'zp'
data.rename(index=str, columns={col_name: new_col_name}, inplace=True)
return data
| mit |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-signal-welch-1.py | 1 | 1135 | from scipy import signal
import matplotlib.pyplot as plt
# Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
# 0.001 V**2/Hz of white noise sampled at 10 kHz.
fs = 10e3
N = 1e5
amp = 2*np.sqrt(2)
freq = 1234.0
noise_power = 0.001 * fs / 2
time = np.arange(N) / fs
x = amp*np.sin(2*np.pi*freq*time)
x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
# Compute and plot the power spectral density.
f, Pxx_den = signal.welch(x, fs, nperseg=1024)
plt.semilogy(f, Pxx_den)
plt.ylim([0.5e-3, 1])
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.show()
# If we average the last half of the spectral density, to exclude the
# peak, we can recover the noise power on the signal.
np.mean(Pxx_den[256:])
# 0.0009924865443739191
# Now compute and plot the power spectrum.
f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
plt.figure()
plt.semilogy(f, np.sqrt(Pxx_spec))
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
plt.show()
# The peak height in the power spectrum is an estimate of the RMS amplitude.
np.sqrt(Pxx_spec.max())
# 2.0077340678640727
| gpl-2.0 |
fierval/KaggleMalware | Learning/predict.py | 1 | 4473 | from tr_utils import write_to_csv, time_now_str, vote_reduce, vote
from SupervisedLearning import SKSupervisedLearning
from train_files import TrainFiles
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.svm import SVC
from os import path
from sklearn.metrics import log_loss
from sklearn.decomposition.pca import PCA
from sklearn.calibration import CalibratedClassifierCV
from train_nn import predict_nn, _createDataSet, _createUnsupervisedDataSet
prediction = True
doTrees = True
def predict():
tf = TrainFiles('/kaggle/malware/train/mix_lbp', val_path = '/kaggle/malware/test/mix_lbp', labels_file = "/kaggle/malware/trainLabels.csv")
X_train, Y_train, X_test, Y_test = tf.prepare_inputs()
sl_svm = SKSupervisedLearning(SVC, X_train, Y_train, X_test, Y_test)
sl_svm.fit_standard_scaler()
sl_svm.train_params = {'C': 100, 'gamma': 0.01, 'probability': True}
print "Starting SVM: ", time_now_str()
_, ll_svm = sl_svm.fit_and_validate()
print "SVM score: {0:.4f}".format(ll_svm if not prediction else _)
print "Finished training SVM: ", time_now_str()
# neural net
print "Starting NN: ", time_now_str()
trndata = _createDataSet(sl_svm.X_train_scaled, Y_train, one_based = True)
tstdata = _createUnsupervisedDataSet(sl_svm.X_test_scaled)
fnn = predict_nn(trndata)
proba_nn = fnn.activateOnDataset(tstdata)
print "Finished training NN: ", time_now_str()
# no validation labels on actual prediction
if doTrees:
# random forest
sl_ccrf = SKSupervisedLearning(CalibratedClassifierCV, X_train, Y_train, X_test, Y_test)
sl_ccrf.train_params = \
{'base_estimator': RandomForestClassifier(**{'n_estimators' : 7500, 'max_depth' : 200}), 'cv': 10}
sl_ccrf.fit_standard_scaler()
print "Starting on RF: ", time_now_str()
ll_ccrf_trn, ll_ccrf_tst = sl_ccrf.fit_and_validate()
print "RF score: {0:.4f}".format(ll_ccrf_tst if not prediction else ll_ccrf_trn)
sl_ccrf.proba_test.tofile("/temp/sl_ccrf.prob")
sl_svm.proba_test.tofile("/temp/sl_svm.prob")
proba_nn.tofile("/temp/nn.prob")
print "Finished training RF: ", time_now_str()
if prediction:
proba = vote([sl_svm.proba_test, sl_ccrf.proba_test, proba_nn], [2./3., 1./6., 1./3.])
out_labels = "/kaggle/malware/submission33.csv"
task_labels = "/kaggle/malware/testLabels.csv"
labels = [path.splitext(t)[0] for t in tf.get_val_inputs()]
out = write_to_csv(task_labels, labels, proba, out_labels)
else:
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=2).fit(sl_svm.X_train_scaled)
X = pca.transform(sl_svm.X_train_scaled)
x = np.arange(X[:, 0].min() - 1, X[:, 1].max() + 1, 1)
y = np.arange(X[:, 1].min() - 1, X[:, 1].max() + 1, 1)
xx, yy = np.meshgrid(x, y)
# title for the plots
titles = ['SVC with rbf kernel',
'Random Forest \n'
'n_components=7500',
'Decision Trees \n'
'n_components=7500']
#plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((sl_svm.clf, sl_rfc.clf, sl_trees.clf)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
clf.fit(X, Y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
def write_canned_predictions(tf, task_labels, out_labels):
proba_svm = np.fromfile('/kaggle/malware/results/sl_svm.prob').reshape(-1, 9, order = 'C')
proba_rf = np.fromfile('/kaggle/malware/results/sl_ccrf.prob').reshape(-1, 9, order = 'C')
proba = vote([proba_svm, proba_rf], [4./5., 1./3.])
labels = [path.splitext(t)[0] for t in tf.get_val_inputs()]
out = write_to_csv(task_labels, labels, proba, out_labels)
| mit |
gclenaghan/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 26 | 8544 | import numpy as np
from scipy import linalg
from sklearn.decomposition import (NMF, ProjectedGradientNMF,
non_negative_factorization)
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = NMF(solver=solver, n_components=4, init='nndsvd', random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = ProjectedGradientNMF(n_components=5, random_state=0, tol=tol).fit(A)
data_sp = ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = NMF(solver=solver, random_state=0, tol=1e-4, n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
SparkFreedom/DataDive | getting_started_with_d3/cleaning_code/parse_waittime.py | 1 | 3438 | import xml.dom.minidom
import json
import pandas
import datetime
from operator import itemgetter
import time
# For this example we had to load and parse the NYCT Performance XML file.
# Essentially, we need to parse the XML, pull out the indicators we want, and
# then save them to a JSON which is much easier to play with in javascript.
# For this particular example, we are actually using two JSON files, which will
# be loaded separately. The second JSON is the mean of data in the first, so we
# need only parse the XML once.
# data available at http://www.mta.info/developers/data/Performance_XML_Data.zip
# use the minidom to parse the XML.
dom = xml.dom.minidom.parse('Performance_NYCT.xml')
# pull out all the indicators in the XML
indicators = dom.documentElement.getElementsByTagName('INDICATOR')
# this is a little function that just gets the data out of a particular indicator.
# it just saves us a bit of typing...
def get(indicator, name):
return indicator.getElementsByTagName(name)[0].childNodes[0].data
# we only want those wait assessments associated with a specific line, so
# we include that extra "-" which doesn't appear in other indicator names
to_pull = 'Subway Wait Assessment -'
# initialise the list that we will dump to a JSON
out = []
for indicator in indicators:
# if this is the right sort of indicator...
if to_pull in indicator.getElementsByTagName('INDICATOR_NAME')[0].childNodes[0].data:
try:
# we get the name first as we need to use it for display, but reverse
# it for the #id
name = get(indicator, 'INDICATOR_NAME').split('-')[1].strip()
# we can't use CSS selectors that start with a number! So we gotta
# make something like line_2 instead of 2_line.
line_id = name.split(' ')
line_id.reverse()
line_id = '_'.join(line_id)
# the time index here is month and year, which are in separate tags for
# some reason, making our lives uncessarily complicated
month = get(indicator, 'PERIOD_MONTH')
year = get(indicator, 'PERIOD_YEAR')
# note that the timestamp is in microseconds for javascript
timestamp = int(time.mktime(
datetime.datetime.strptime(month+year,"%m%Y").timetuple()
)) * 1000
out.append({
"line_name": name,
"line_id": line_id,
"late_percent": float(get(indicator, 'MONTHLY_ACTUAL')),
"time": timestamp,
})
except IndexError:
# sometimes a tag is empty, so we just chuck out that month
pass
# filter out zero entries
out = [
o for o in out if o['late_percent']
if 'S' not in o['line_name']
if 'V' not in o['line_name']
if 'W' not in o['line_name']
]
# dump the data
json.dump(out, open('../viz/data/subway_wait.json','w'))
# compute the mean per line (easy with pandas!)
# build the data frame
df = pandas.DataFrame(out)
# groupby line and take the mean
df_mean = df.groupby('line_name').mean()['late_percent']
# build up the JSON object (one day pandas will have .to_json())
out = [
{"line_id":'_'.join(reversed(d[0].split(' '))), "line_name":d[0], "mean": d[1]}
for d in df_mean.to_dict().items()
]
out.sort(key=itemgetter('line_name'))
# dump the data
json.dump(out, open('../viz/data/subway_wait_mean.json','w')) | mit |
f3r/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
Caoimhinmg/PmagPy | programs/quick_hyst.py | 1 | 8596 | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
from builtins import range
import sys
import os
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.new_builder as nb
def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
plots = 0
pltspec = ""
verbose = pmagplotlib.verbose
#version_num = pmag.get_version()
dir_path = pmag.get_named_arg_from_sys('-WD', '.')
dir_path = os.path.realpath(dir_path)
meas_file = pmag.get_named_arg_from_sys('-f', 'measurements.txt')
fmt = pmag.get_named_arg_from_sys('-fmt', 'png')
if '-sav' in args:
verbose = 0
plots = 1
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = 0
plots = 1
#
con = nb.Contribution(dir_path, read_tables=['measurements'],
custom_filenames={'measurements': meas_file})
# get as much name data as possible (used for naming plots)
if not 'measurements' in con.tables:
print("-W- No measurement file found")
return
con.propagate_name_down('location', 'measurements')
if 'measurements' not in con.tables:
print(main.__doc__)
print('bad file')
sys.exit()
meas_container = con.tables['measurements']
#meas_df = meas_container.df
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs = []
HDD = {}
HDD['hyst'] = 1
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
#
# get list of unique experiment names and specimen names
#
sids = []
hyst_data = meas_container.get_records_for_code('LP-HYS')
#experiment_names = hyst_data['experiment_name'].unique()
if not len(hyst_data):
print("-W- No hysteresis data found")
return
sids = hyst_data['specimen'].unique()
# if 'treat_temp' is provided, use that value, otherwise assume 300
hyst_data['treat_temp'].where(hyst_data['treat_temp'], '300', inplace=True)
# start at first specimen, or at provided specimen ('-spc')
k = 0
if pltspec != "":
try:
print(sids)
k = list(sids).index(pltspec)
except ValueError:
print('-W- No specimen named: {}.'.format(pltspec))
print('-W- Please provide a valid specimen name')
return
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
while k < len(sids):
locname, site, sample, synth = '', '', '', ''
s = sids[k]
if verbose:
print(s, k + 1, 'out of ', len(sids))
# B, M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M = [], []
# get all measurements for this specimen
spec = hyst_data[hyst_data['specimen'] == s]
# get names
if 'location' in spec:
locname = spec['location'][0]
if 'site' in spec:
site = spec['sample'][0]
if 'sample' in spec:
sample = spec['sample'][0]
# get all records with non-blank values in any intlist column
# find intensity data
for int_column in intlist:
if int_column in spec.columns:
int_col = int_column
break
meas_data = spec[spec[int_column].notnull()]
if len(meas_data) == 0:
break
#
c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-']
cnum = 0
Temps = []
xlab, ylab, title = '', '', ''
Temps = meas_data['treat_temp'].unique()
for t in Temps:
print('working on t: ', t)
t_data = meas_data[meas_data['treat_temp'] == t]
m = int_col
B = t_data['meas_field_dc'].astype(float).values
M = t_data[m].astype(float).values
# now plot the hysteresis curve(s)
#
if len(B) > 0:
B = numpy.array(B)
M = numpy.array(M)
if t == Temps[-1]:
xlab = 'Field (T)'
ylab = m
title = 'Hysteresis: ' + s
if t == Temps[0]:
pmagplotlib.clearFIG(HDD['hyst'])
pmagplotlib.plotXY(HDD['hyst'],B,M,sym=c[cnum],xlab=xlab,ylab=ylab,title=title)
pmagplotlib.plotXY(HDD['hyst'],[1.1*B.min(),1.1*B.max()],[0,0],sym='k-',xlab=xlab,ylab=ylab,title=title)
pmagplotlib.plotXY(HDD['hyst'],[0,0],[1.1*M.min(),1.1*M.max()],sym='k-',xlab=xlab,ylab=ylab,title=title)
if verbose:
pmagplotlib.drawFIGS(HDD)
cnum += 1
if cnum == len(c):
cnum = 0
#
files = {}
if plots:
if pltspec != "":
s = pltspec
for key in list(HDD.keys()):
if pmagplotlib.isServer:
if synth == '':
files[key] = "LO:_"+locname+'_SI:_'+site+'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
files[key] = 'SY:_'+synth+'_TY:_'+key+'_.'+fmt
else:
if synth == '':
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
else:
files[key] = "{}_{}.{}".format(synth, key, fmt)
pmagplotlib.saveP(HDD, files)
if pltspec != "":
sys.exit()
if verbose:
pmagplotlib.drawFIGS(HDD)
ans = input("S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if pmagplotlib.isServer: # use server plot naming convention
files[key] = "LO:_"+locname+'_SI:_'+site+'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else: # use more readable plot naming convention
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
pmagplotlib.saveP(HDD, files)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
sys.exit()
if ans == 's':
keepon = 1
specimen = input('Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if __name__ == "__main__":
main()
| bsd-3-clause |
SEMCOG/synthpop | synthpop/recipes/starter2.py | 1 | 15159 | import numpy as np
import pandas as pd
from .. import categorizer as cat
from ..census_helpers import Census
# TODO DOCSTRINGS!!
class Starter:
"""
This is a recipe for getting the marginals and joint distributions to use
to pass to the synthesizer using simple categories - population, age,
race, and sex for people, and children, income, cars, and workers for
households. This module is responsible for
Parameters
----------
c : object
census_helpers.Census object
state : string
FIPS code the state
county : string
FIPS code for the county
tract : string, optional
FIPS code for a specific track or None for all tracts in the county
Returns
-------
household_marginals : DataFrame
Marginals per block group for the household data (from ACS)
person_marginals : DataFrame
Marginals per block group for the person data (from ACS)
household_jointdist : DataFrame
joint distributions for the households (from PUMS), one joint
distribution for each PUMA (one row per PUMA)
person_jointdist : DataFrame
joint distributions for the persons (from PUMS), one joint
distribution for each PUMA (one row per PUMA)
tract_to_puma_map : dictionary
keys are tract ids and pumas are puma ids
"""
def __init__(self, key, state, county, tract=None):
self.c = c = Census(key)
self.state = state
self.county = county
self.tract = tract
structure_size_columns = ['B25032_0%02dE' % i for i in range(1, 24)]
age_of_head_columns = ['B25007_0%02dE' % i for i in range(1, 22)]
race_of_head_columns = ['B25006_0%02dE' % i for i in range(1, 11)]
hispanic_head_columns = ['B25003I_0%02dE' % i for i in range(1, 4)]
hh_size_columns = ['B25009_0%02dE' % i for i in range(1, 18)]
income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]
vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]
workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]
presence_of_children_columns = ['B11005_001E', 'B11005_002E', 'B11005_011E']
presence_of_seniors_columns = ['B11007_002E', 'B11007_007E']
tenure_mover_columns = ['B25038_0%02dE' % i for i in range(1, 16)]
block_group_columns = (
income_columns + presence_of_children_columns +
presence_of_seniors_columns + tenure_mover_columns +
hh_size_columns + age_of_head_columns + structure_size_columns +
race_of_head_columns + hispanic_head_columns)
tract_columns = vehicle_columns + workers_columns
h_acs = c.block_group_and_tract_query(
block_group_columns,
tract_columns, state, county,
merge_columns=['tract', 'county', 'state'],
block_group_size_attr="B11005_001E",
tract_size_attr="B08201_001E",
tract=tract)
self.h_acs = h_acs
self.h_acs_cat = cat.categorize(h_acs, {
("sf_detached", "yes"): "B25032_003E + B25032_014E",
("sf_detached", "no"): "B25032_001E - B25032_003E - B25032_014E",
("hh_age_of_head", "lt35"):
"B25007_003E + B25007_004E + B25007_013E + B25007_014E",
("hh_age_of_head", "gt35-lt65"):
"B25007_005E + B25007_006E + B25007_007E + B25007_008E + "
"B25007_015E + B25007_016E + B25007_017E + B25007_018E",
("hh_age_of_head", "gt65"):
"B25007_009E + B25007_010E + B25007_011E + "
"B25007_019E + B25007_020E + B25007_021E",
("hh_race_of_head", "black"): "B25006_003E",
("hh_race_of_head", "white"): "B25006_002E",
("hh_race_of_head", "asian"): "B25006_005E",
("hh_race_of_head", "other"):
"B25006_004E + B25006_006E + B25006_007E + B25006_008E ",
("hispanic_head", "yes"): "B25003I_001E",
("hispanic_head", "no"): "B11005_001E - B25003I_001E",
("hh_children", "yes"): "B11005_002E",
("hh_children", "no"): "B11005_011E",
("seniors", "yes"): "B11007_002E",
("seniors", "no"): "B11007_007E",
("hh_income", "lt30"):
"B19001_002E + B19001_003E + B19001_004E + "
"B19001_005E + B19001_006E",
("hh_income", "gt30-lt60"):
"B19001_007E + B19001_008E + B19001_009E + "
"B19001_010E + B19001_011E",
("hh_income", "gt60-lt100"): "B19001_012E + B19001_013E",
("hh_income", "gt100-lt150"): "B19001_014E + B19001_015E",
("hh_income", "gt150"): "B19001_016E + B19001_017E",
("hh_cars", "none"): "B08201_002E",
("hh_cars", "one"): "B08201_003E",
("hh_cars", "two or more"):
"B08201_004E + B08201_005E + B08201_006E",
("hh_workers", "none"): "B08202_002E",
("hh_workers", "one"): "B08202_003E",
("hh_workers", "two or more"): "B08202_004E + B08202_005E",
("tenure_mover", "own recent"): "B25038_003E",
("tenure_mover", "own not recent"): "B25038_002E - B25038_003E",
("tenure_mover", "rent recent"): "B25038_010E",
("tenure_mover", "rent not recent"): "B25038_009E - B25038_010E",
("hh_size", "one"): "B25009_003E + B25009_011E",
("hh_size", "two"): "B25009_004E + B25009_012E",
("hh_size", "three"): "B25009_005E + B25009_013E",
("hh_size", "four or more"): "B25009_006E + B25009_014E + "
"B25009_007E + B25009_015E + "
"B25009_008E + B25009_016E + "
"B25009_009E + B25009_017E",
}, index_cols=['state', 'county', 'tract', 'block group'])
# gq_population = ['B26001_001E']
# HH population, for the hhpop/totalpop adjustment
hh_population = ['B11002_001E']
population = ['B01001_001E'] # This includes GQ
hispanic = ['B03003_002E', 'B03003_003E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns + hh_population + hispanic
p_acs = c.block_group_query(all_columns, state, county, tract=tract)
self.p_acs_cat = cat.categorize(p_acs, {
("person_age", "19 and under"):
"(B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E) * B11002_001E*1.0/B01001_001E",
("person_age", "20 to 35"):
"(B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E) * B11002_001E*1.0/B01001_001E",
("person_age", "35 to 60"):
"(B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E) * B11002_001E*1.0/B01001_001E",
("person_age", "above 60"):
"(B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E) * B11002_001E*1.0/B01001_001E",
("race", "white"): "(B02001_002E) * B11002_001E*1.0/B01001_001E",
("race", "black"): "(B02001_003E) * B11002_001E*1.0/B01001_001E",
("race", "asian"): "(B02001_005E) * B11002_001E*1.0/B01001_001E",
("race", "other"): "(B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E) * B11002_001E*1.0/B01001_001E",
("person_sex", "male"):
"(B01001_002E) * B11002_001E*1.0/B01001_001E",
("person_sex", "female"):
"(B01001_026E) * B11002_001E*1.0/B01001_001E",
("hispanic", "yes"):
"(B03003_003E) * B11002_001E*1.0/B01001_001E",
("hispanic", "no"):
"(B03003_002E) * B11002_001E*1.0/B01001_001E",
}, index_cols=['state', 'county', 'tract', 'block group'])
def get_geography_name(self):
# this synthesis is at the block group level for most variables
return "block_group"
def get_num_geographies(self):
return len(self.p_acs_cat)
def get_available_geography_ids(self):
# return the ids of the geographies, in this case a state, county,
# tract, block_group id tuple
for tup in self.p_acs_cat.index:
yield pd.Series(tup, index=self.p_acs_cat.index.names)
def get_household_marginal_for_geography(self, ind):
return self.h_acs_cat.loc[tuple(ind.values)]
def get_person_marginal_for_geography(self, ind):
return self.p_acs_cat.loc[tuple(ind.values)]
def get_household_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
h_pums = self.c.download_household_pums(ind.state, puma10, puma00)
p_pums = self.c.download_population_pums(ind.state, puma10, puma00)
elif np.isnan(puma00): # only puma10 available
h_pums = self.c.download_household_pums(ind.state, puma10, None)
p_pums = self.c.download_population_pums(ind.state, puma10, None)
h_pums = h_pums.set_index('serialno')
# join persons to households,
# calculate needed household-level variables
age_of_head = p_pums[p_pums.RELP == 0].groupby('serialno').AGEP.max()
num_workers = p_pums[p_pums.ESR.isin([1, 2, 4, 5])].groupby(
'serialno').size()
h_pums['race_of_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').RAC1P.max()
h_pums['hispanic_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').HISP.max()
h_pums['age_of_head'] = age_of_head
h_pums['workers'] = num_workers
h_pums.workers = h_pums.workers.fillna(0)
h_pums = h_pums.reset_index()
def sf_detached_cat(r):
if r.BLD == 2:
return "yes"
return "no"
def age_of_head_cat(r):
if r.age_of_head < 35:
return "lt35"
elif r.age_of_head >= 65:
return "gt65"
return "gt35-lt65"
def race_of_head_cat(r):
if r.race_of_head == 1:
return "white"
elif r.race_of_head == 2:
return "black"
elif r.race_of_head == 6:
return "asian"
return "other"
def hispanic_head_cat(r):
if r.hispanic_head == 1:
return "no"
return "yes"
def hh_size_cat(r):
if r.NP == 1:
return "one"
elif r.NP == 2:
return "two"
elif r.NP == 3:
return "three"
return "four or more"
def cars_cat(r):
if r.VEH == 0:
return "none"
elif r.VEH == 1:
return "one"
return "two or more"
def children_cat(r):
if r.R18 == 1:
return "yes"
return "no"
def seniors_cat(r):
if r.R65 > 0:
return "yes"
return "no"
def income_cat(r):
if r.HINCP >= 150000:
return "gt150"
elif (r.HINCP >= 100000) & (r.HINCP < 150000):
return "gt100-lt150"
elif (r.HINCP >= 60000) & (r.HINCP < 100000):
return "gt60-lt100"
elif (r.HINCP >= 30000) & (r.HINCP < 60000):
return "gt30-lt60"
return "lt30"
def workers_cat(r):
if r.workers >= 2:
return "two or more"
elif r.workers == 1:
return "one"
return "none"
def tenure_mover_cat(r):
if (r.MV < 4) & (r.TEN < 3):
return "own recent"
elif (r.MV >= 4) & (r.TEN < 3):
return "own not recent"
elif (r.MV < 4) & (r.TEN >= 3):
return "rent recent"
return "rent not recent"
h_pums, jd_households = cat.joint_distribution(
h_pums,
cat.category_combinations(self.h_acs_cat.columns),
{"hh_cars": cars_cat,
"hh_children": children_cat,
"hh_income": income_cat,
"hh_workers": workers_cat,
"tenure_mover": tenure_mover_cat,
"seniors": seniors_cat,
"hh_size": hh_size_cat,
"hh_age_of_head": age_of_head_cat,
"sf_detached": sf_detached_cat,
"hh_race_of_head": race_of_head_cat,
"hispanic_head": hispanic_head_cat}
)
return h_pums, jd_households
def get_person_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
p_pums = self.c.download_population_pums(ind.state, puma10, puma00)
elif np.isnan(puma00): # only puma10 available
p_pums = self.c.download_population_pums(ind.state, puma10, None)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
def hispanic_cat(r):
if r.HISP == 1:
return "no"
return "yes"
p_pums, jd_persons = cat.joint_distribution(
p_pums,
cat.category_combinations(self.p_acs_cat.columns),
{"person_age": age_cat, "race": race_cat, "person_sex": sex_cat,
"hispanic": hispanic_cat}
)
return p_pums, jd_persons
| bsd-3-clause |
AtsushiSakai/PythonRobotics | PathPlanning/FrenetOptimalTrajectory/frenet_optimal_trajectory.py | 1 | 9435 | """
Frenet optimal trajectory generator
author: Atsushi Sakai (@Atsushi_twi)
Ref:
- [Optimal Trajectory Generation for Dynamic Street Scenarios in a Frenet Frame]
(https://www.researchgate.net/profile/Moritz_Werling/publication/224156269_Optimal_Trajectory_Generation_for_Dynamic_Street_Scenarios_in_a_Frenet_Frame/links/54f749df0cf210398e9277af.pdf)
- [Optimal trajectory generation for dynamic street scenarios in a Frenet Frame]
(https://www.youtube.com/watch?v=Cj6tAQe7UCY)
"""
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../QuinticPolynomialsPlanner/")
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../CubicSpline/")
try:
from quintic_polynomials_planner import QuinticPolynomial
import cubic_spline_planner
except ImportError:
raise
SIM_LOOP = 500
# Parameter
MAX_SPEED = 50.0 / 3.6 # maximum speed [m/s]
MAX_ACCEL = 2.0 # maximum acceleration [m/ss]
MAX_CURVATURE = 1.0 # maximum curvature [1/m]
MAX_ROAD_WIDTH = 7.0 # maximum road width [m]
D_ROAD_W = 1.0 # road width sampling length [m]
DT = 0.2 # time tick [s]
MAX_T = 5.0 # max prediction time [m]
MIN_T = 4.0 # min prediction time [m]
TARGET_SPEED = 30.0 / 3.6 # target speed [m/s]
D_T_S = 5.0 / 3.6 # target speed sampling length [m/s]
N_S_SAMPLE = 1 # sampling number of target speed
ROBOT_RADIUS = 2.0 # robot radius [m]
# cost weights
K_J = 0.1
K_T = 0.1
K_D = 1.0
K_LAT = 1.0
K_LON = 1.0
show_animation = True
class QuarticPolynomial:
def __init__(self, xs, vxs, axs, vxe, axe, time):
# calc coefficient of quartic polynomial
self.a0 = xs
self.a1 = vxs
self.a2 = axs / 2.0
A = np.array([[3 * time ** 2, 4 * time ** 3],
[6 * time, 12 * time ** 2]])
b = np.array([vxe - self.a1 - 2 * self.a2 * time,
axe - 2 * self.a2])
x = np.linalg.solve(A, b)
self.a3 = x[0]
self.a4 = x[1]
def calc_point(self, t):
xt = self.a0 + self.a1 * t + self.a2 * t ** 2 + \
self.a3 * t ** 3 + self.a4 * t ** 4
return xt
def calc_first_derivative(self, t):
xt = self.a1 + 2 * self.a2 * t + \
3 * self.a3 * t ** 2 + 4 * self.a4 * t ** 3
return xt
def calc_second_derivative(self, t):
xt = 2 * self.a2 + 6 * self.a3 * t + 12 * self.a4 * t ** 2
return xt
def calc_third_derivative(self, t):
xt = 6 * self.a3 + 24 * self.a4 * t
return xt
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# Lateral motion planning
for Ti in np.arange(MIN_T, MAX_T, DT):
fp = FrenetPath()
# lat_qp = quintic_polynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# Longitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE,
TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2
tfp.cd = K_J * Jp + K_T * Ti + K_D * tfp.d[-1] ** 2
tfp.cv = K_J * Js + K_T * Ti + K_D * ds
tfp.cf = K_LAT * tfp.cd + K_LON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
i_yaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(i_yaw + math.pi / 2.0)
fy = iy + di * math.sin(i_yaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.hypot(dx, dy))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS ** 2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
ok_ind = []
for i, _ in enumerate(fplist):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in
fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in
fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
ok_ind.append(i)
return [fplist[i] for i in ok_ind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
min_cost = float("inf")
best_path = None
for fp in fplist:
if min_cost >= fp.cf:
min_cost = fp.cf
best_path = fp
return best_path
def generate_target_course(x, y):
csp = cubic_spline_planner.Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
def main():
print(__file__ + " start!!")
# way points
wx = [0.0, 10.0, 20.5, 35.0, 70.5]
wy = [0.0, -6.0, 5.0, 6.5, 0.0]
# obstacle lists
ob = np.array([[20.0, 10.0],
[30.0, 6.0],
[30.0, 8.0],
[35.0, 8.0],
[50.0, 3.0]
])
tx, ty, tyaw, tc, csp = generate_target_course(wx, wy)
# initial state
c_speed = 10.0 / 3.6 # current speed [m/s]
c_d = 2.0 # current lateral position [m]
c_d_d = 0.0 # current lateral speed [m/s]
c_d_dd = 0.0 # current lateral acceleration [m/s]
s0 = 0.0 # current course position
area = 20.0 # animation area length [m]
for i in range(SIM_LOOP):
path = frenet_optimal_planning(
csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
s0 = path.s[1]
c_d = path.d[1]
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
c_speed = path.s_d[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
break
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.plot(tx, ty)
plt.plot(ob[:, 0], ob[:, 1], "xk")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.title("v[km/h]:" + str(c_speed * 3.6)[0:4])
plt.grid(True)
plt.pause(0.0001)
print("Finish")
if show_animation: # pragma: no cover
plt.grid(True)
plt.pause(0.0001)
plt.show()
if __name__ == '__main__':
main()
| mit |
davemccormick/pyAnimalTrack | src/pyAnimalTrack/ui/Controller/FeaturesWindow.py | 1 | 4615 | # TODO: Stop the overflow: None of the plot legend
# TODO: Time epochs
import os
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow
from pyAnimalTrack.ui.Controller.TableAndGraphView import TableAndGraphView
from pyAnimalTrack.ui.Model.FeaturesModel import FeaturesModel
from pyAnimalTrack.ui.Model.SettingsModel import SettingsModel
from pyAnimalTrack.ui.Model.TableModel import TableModel
from pyAnimalTrack.ui.Service.FeaturesCalculator import FeaturesCalculator
from pyAnimalTrack.ui.Service.SaveDataframe import SaveDataframe
viewFilePath = os.path.join(os.path.dirname(__file__), '../../../../View/')
if not os.path.exists(viewFilePath):
viewFilePath = os.path.join(os.path.dirname(__file__), '../View/')
uiFeaturesWindow = uic.loadUiType(os.path.join(viewFilePath, 'FeaturesWindow.ui'))[0]
class FeaturesWindow(QMainWindow, uiFeaturesWindow, TableAndGraphView):
def __init__(self, *args):
""" Constructor
:param args: PyQt program arguments
:return: void
"""
super(FeaturesWindow, self).__init__(*args)
self.setupUi(self)
TableAndGraphView.__init__(self, self.featureTableView, self.currentColumnComboBox, self.plotFrame, self.legendFrame, self.redraw_graph)
self.featureModel = None
self.tableDataFile = None
self.lowPassData = None
self.saveToDataFileButton.clicked.connect(self.save_data_to_file)
self.saveGraphButton.clicked.connect(self.save_graph_to_file)
def set_data(self, unfiltered_data, low_pass_data, high_pass_data):
""" Set the datasets for the features window
:param unfiltered_data: A pandas dataset, as read from the input stream
:param low_pass_data: A pandas dataset, after running through the low-pass filter
:param high_pass_data: A pandas dataset, after running through the high-pass filter
:return: void
"""
# Cache the dataset for the graph
self.lowPassData = low_pass_data
features = []
out = FeaturesCalculator.calculate(unfiltered_data, low_pass_data)
for row in range(0, len(unfiltered_data) - 1):
features.append([
', '.join([str(unfiltered_data['ax'][row]), str(unfiltered_data['ay'][row]),
str(unfiltered_data['az'][row])]),
out['SMA'][row],
out['SVM'][row],
out['MOV'][row],
out['ENG'][row],
out['ENT'][row],
out['PIT'][row],
out['ROL'][row],
out['INC'][row]
])
self.featureModel = FeaturesModel(features)
self.tableDataFile = TableModel(self.featureModel)
self.featureTableView.setModel(self.tableDataFile)
TableAndGraphView.after_init(self)
# Load filter controlling dropdown
self.currentColumnComboBox.clear()
self.currentColumnComboBox.addItems(self.featureModel.getReadableColumns())
def redraw_graph(self):
""" Redraw the graph
:returns: void
"""
current_column = self.currentColumnComboBox.currentIndex()
# TODO: Read lines from config
if current_column == 0:
lines = self.plot.plot(
self.lowPassData['ax'], SettingsModel.get_value('lines')[0],
self.lowPassData['ay'], SettingsModel.get_value('lines')[1],
self.lowPassData['az'], SettingsModel.get_value('lines')[2]
)
lines[0].set_label('X')
lines[1].set_label('Y')
lines[2].set_label('Z')
self.legendPlot.legend(bbox_to_anchor=(-4, 0.9, 2., .102), loc=2, handles=lines)
else:
lines = self.plot.plot(
self.tableDataFile.get_dataset()[self.featureModel.getColumns()[current_column]].values, SettingsModel.get_value('lines')[0]
)
lines[0].set_label(self.featureModel.getReadableColumns()[current_column].replace(' ', '\n'))
self.legendPlot.legend(bbox_to_anchor=(-3.5, 0.9, 2., .102), loc=2, handles=lines)
self.canvas.draw()
self.legendCanvas.draw()
def save_data_to_file(self):
filename = SaveDataframe.save(self.tableDataFile.get_dataset(), 'data')
if filename:
self.featuresStatusBar.showMessage('Saved to ' + filename)
def save_graph_to_file(self):
filename = SaveDataframe.save(self.figure, 'graph')
if filename:
self.featuresStatusBar.showMessage('Saved to ' + filename) | gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/mpl_toolkits/axes_grid1/mpl_axes.py | 7 | 5071 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import warnings
import matplotlib.axes as maxes
from matplotlib.artist import Artist
from matplotlib.axis import XAxis, YAxis
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
def toggle_axisline(self, b):
warnings.warn("toggle_axisline is not necessary and deprecated in axes_grid1")
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects(
[super(Axes.AxisDict, self).__getitem__(k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start is None and k.stop is None and k.step is None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
super(Axes, self).__init__(*kl, **kw)
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"])
self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"])
self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"])
self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"])
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def cla(self):
super(Axes, self).cla()
self._init_axis_artists()
class SimpleAxisArtist(Artist):
def __init__(self, axis, axisnum, spine):
self._axis = axis
self._axisnum = axisnum
self.line = spine
if isinstance(axis, XAxis):
self._axis_direction = ["bottom", "top"][axisnum-1]
elif isinstance(axis, YAxis):
self._axis_direction = ["left", "right"][axisnum-1]
else:
raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,))
Artist.__init__(self)
def _get_major_ticks(self):
tickline = "tick%dline" % self._axisnum
return SimpleChainedObjects([getattr(tick, tickline) for tick \
in self._axis.get_major_ticks()])
def _get_major_ticklabels(self):
label = "label%d" % self._axisnum
return SimpleChainedObjects([getattr(tick, label) for tick \
in self._axis.get_major_ticks()])
def _get_label(self):
return self._axis.label
major_ticks = property(_get_major_ticks)
major_ticklabels = property(_get_major_ticklabels)
label = property(_get_label)
def set_visible(self, b):
self.toggle(all=b)
self.line.set_visible(b)
self._axis.set_visible(True)
Artist.set_visible(self, b)
def set_label(self, txt):
self._axis.set_label_text(txt)
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
tickOn = "tick%dOn" % self._axisnum
labelOn = "label%dOn" % self._axisnum
if _ticks is not None:
tickparam = {tickOn: _ticks}
self._axis.set_tick_params(**tickparam)
if _ticklabels is not None:
tickparam = {labelOn: _ticklabels}
self._axis.set_tick_params(**tickparam)
if _label is not None:
pos = self._axis.get_label_position()
if (pos == self._axis_direction) and not _label:
self._axis.label.set_visible(False)
elif _label:
self._axis.label.set_visible(True)
self._axis.set_label_position(self._axis_direction)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.cla()
| mit |
indhub/mxnet | example/gluon/kaggle_k_fold_cross_validation.py | 26 | 6854 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
| apache-2.0 |
PrashntS/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
asttra/pysces | pysces/__init__.py | 1 | 15602 | """
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2015 B.G. Olivier, J.M. Rohwer, J.-H.S Hofmeyr all rights reserved,
Brett G. Olivier ([email protected])
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Brett G. Olivier
"""
from version import __version__
__doc__ = """
PySCeS: the Python Simulator for Cellular Systems
-------------------------------------------------
PySCeS is developed by the Triple-J Group for Molecular Cell Physiology
in order to try model and understand the complex processes and systems
which make up the living cell. PySCeS features, amongst other things:
- A text based Model Description Language.
- A structural analysis module.
- Integrators for time simulation
- Non-linear solvers for steady-state analysis
- A module for performing Metabolic Control Analysis
- A bifurcation module for systems which exhibit multiple steady states
- A variety of extra utilites for parameter scans, data output and plotting.
- A dynamic module loading framework.
- SBML import and export capability.
"""
import os, time
import PyscesConfig
import PyscesParse
import PyscesLink as link
import codeutil
import PyscesSED as SED
from PyscesUtils import str2bool
from PyscesModelMap import ModelMap
#TODO get rid unused imports
from PyscesWeb import PyscesHTML
html = PyscesHTML()
DEBUG = False
inipath = None
lpath = None
install_dir = None
output_dir = None
model_dir = None
pitcon_switch = False
nleq2_switch = False
__USE_MATPLOTLIB__ = True
__MATPLOTLIB_BACKEND__ = 'TKagg'
__USE_GNUPLOT__ = False
__SILENT_START__ = False
if os.sys.platform == 'win32':
__PyscesConfigDefault = PyscesConfig.__DefaultWin
else:
__PyscesConfigDefault = PyscesConfig.__DefaultPosix
if DEBUG: print time.strftime('1-%H:%M:%S')
eggdir = 'pysces-%s-py%s.%s-%s.egg' %(__version__, os.sys.version_info[0],\
os.sys.version_info[1], os.sys.platform)
for path in os.sys.path:
chkPath = path.split(os.path.sep)[-1]
if chkPath == 'pysces' and path != os.getcwd():
install_dir = path
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif chkPath == eggdir:
install_dir = os.path.join(path, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
if inipath == None:
for k in os.sys.path_importer_cache:
if k.split(os.path.sep)[-1] == 'pysces':
install_dir = k
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif k.split(os.path.sep)[-1] == eggdir:
install_dir = os.path.join(path, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
del eggdir
if DEBUG: print time.strftime('2-%H:%M:%S')
try:
__config_dict = PyscesConfig.ReadConfig(inipath, config=__PyscesConfigDefault)
except Exception, ex:
print ex
print 'Cwd', os.getcwd()
print '\nWARNING: Cannot read pyscfg.ini using default values\n'
__config_dict = __PyscesConfigDefault
# Read config
for key in __config_dict:
if key == 'pitcon':
pitcon_switch = str2bool(__config_dict[key])
elif key == 'nleq2':
nleq2_switch = str2bool(__config_dict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__config_dict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __config_dict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__config_dict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __config_dict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__config_dict[key])
assert inipath != None, '\nNo configuration file found'
if DEBUG: print time.strftime('3-%H:%M:%S')
__userdict = None
if os.sys.platform != 'win32':
if os.path.exists(os.path.join(os.path.expanduser('~'),'Pysces','.pys_usercfg.ini')):
__userdict = PyscesConfig.ReadConfig(os.path.join(os.path.expanduser('~'),'Pysces','.pys_usercfg.ini'), PyscesConfig.__DefaultPosixUsr)
else:
if not os.path.exists(os.path.join(os.path.expanduser('~'),'Pysces')):
os.makedirs(os.path.join(os.path.expanduser('~'),'Pysces'))
PyscesConfig.WriteConfig(os.path.join(os.path.expanduser('~'),'Pysces','.pys_usercfg.ini'),config=PyscesConfig.__DefaultPosixUsr, section='Pysces')
__userdict = PyscesConfig.ReadConfig(os.path.join(os.path.expanduser('~'),'Pysces','.pys_usercfg.ini'), PyscesConfig.__DefaultPosixUsr)
else:
if os.path.exists(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','.pys_usercfg.ini')):
__userdict = PyscesConfig.ReadConfig(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','.pys_usercfg.ini'), PyscesConfig.__DefaultWinUsr)
else:
if not os.path.exists(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces')):
os.makedirs(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces'))
PyscesConfig.WriteConfig(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','.pys_usercfg.ini'), config=PyscesConfig.__DefaultWinUsr, section='Pysces')
__userdict = PyscesConfig.ReadConfig(os.path.join(os.getenv('HOMEDRIVE')+os.path.sep,'Pysces','.pys_usercfg.ini'), PyscesConfig.__DefaultWinUsr)
for key in __userdict:
if key == 'output_dir':
output_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'model_dir':
model_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__userdict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __userdict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__userdict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __userdict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__userdict[key])
assert output_dir != None, '\nNo output directory defined'
assert model_dir != None, '\nNo output directory defined'
del PyscesConfig
if DEBUG: print time.strftime('4-%H:%M:%S')
# initialise pysces.interface.*
try:
import PyscesInterfaces
interface = PyscesInterfaces.Core2interfaces()
except Exception, ex:
print 'INFO: pysces.interface.* not available'
print ex
interface = None
# initialise pysces.plt.*
import PyscesPlot2
gplt = None
mplt = None
if __USE_MATPLOTLIB__:
try:
mplt = PyscesPlot2.MatplotlibUPI(work_dir=output_dir, backend=__MATPLOTLIB_BACKEND__)
if not __SILENT_START__:
print 'Matplotlib interface loaded (pysces.plt.m)'
except Exception, ex:
print 'Matplotlib interface not available'
if DEBUG: print ex
__USE_MATPLOTLIB__ = False
if __USE_GNUPLOT__:
if GNUPLOT_DIR == None or not os.path.exists(GNUPLOT_DIR):
print '''GnuPlot has been enabled but the path to the executable has
not been defined (or does not exist). Please set the "gnuplot_dir" key
in your pyscfg.ini file.
'''
else:
try:
if DEBUG: print GNUPLOT_DIR
gplt = PyscesPlot2.GnuPlotUPI(work_dir=output_dir, gnuplot_dir=GNUPLOT_DIR)
if not __SILENT_START__:
print 'GnuPlot interface loaded (pysces.plt.g)'
except Exception, ex:
print 'GnuPlot interface not available'
if DEBUG: print ex
__USE_GNUPLOT__ = False
plt = None
if __USE_MATPLOTLIB__ or __USE_GNUPLOT__:
plt = PyscesPlot2.PyscesUPI()
if __USE_MATPLOTLIB__ and not __USE_GNUPLOT__:
plt.p_setInterface('matplotlib', mplt)
elif __USE_GNUPLOT__ and not __USE_MATPLOTLIB__:
plt.p_setInterface('gnuplot', gplt)
elif __USE_GNUPLOT__ and __USE_MATPLOTLIB__:
plt.p_setInterface('matplotlib', mplt)
plt.p_setInterface('gnuplot', gplt)
plt.p_deactivateInterface('gnuplot')
if DEBUG: print time.strftime('5-%H:%M:%S')
alt_import = False
alt_import_pitcon = False
alt_import_nleq2 = False
if os.sys.platform == 'win32':
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir,'pitcon'))
try:
import pitcon.pitcon as pitcon
if not __SILENT_START__:
print 'Continuation routines available'
except Exception, ex:
try:
os.environ['path'] = '%s;%s' % (os.path.join(install_dir,'win32'), os.environ['path'])
import pitcon.pitcon as pitcon
if not __SILENT_START__:
print 'Continuation routines available'
except Exception, ex:
#print 'Attempting alternate pitcon import ...'
#alt_import = True
#alt_import_pitcon = True
print ex
print 'INFO: Pitcon import failed: continuation not available'
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir,'nleq2'))
try:
import nleq2.nleq2 as nleq2
if not __SILENT_START__:
print 'NLEQ2 routines available'
except Exception, ex:
try:
os.environ['path'] = '%s;%s' % (os.path.join(install_dir,'win32'), os.environ['path'])
import nleq2.nleq2 as nleq2
if not __SILENT_START__:
print 'NLEQ2 routines available'
except Exception, ex:
#print 'Attempting alternate nleq2 import ...'
#alt_import = True
#alt_import_nleq2 = True
print ex
print 'INFO: NLEQ2 import failed: option not available'
else:
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir,'pitcon'))
try:
import pitcon.pitcon as pitcon
if not __SILENT_START__:
print 'Pitcon routines available'
except Exception, ex:
#print ex
alt_import = True
alt_import_pitcon = True
print 'Attempting alternate pitcon import ...'
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir,'nleq2'))
try:
import nleq2.nleq2 as nleq2
if not __SILENT_START__:
print 'NLEQ2 routines available'
except Exception, ex:
#print ex
alt_import = True
alt_import_nleq2 = True
print 'Attempting alternate nleq2 import ...'
if DEBUG: print time.strftime('6-%H:%M:%S')
if alt_import:
savedir = os.getcwd()
for tpath in os.sys.path:
if alt_import_pitcon:
try:
if os.path.exists(os.path.join(tpath,'pysces','pitcon')) and tpath != '':
os.chdir(os.path.join(tpath,'pysces','pitcon'))
import pitcon
if not __SILENT_START__:
print 'Continuation routines available (A)'
except Exception, ex:
print ex
print 'INFO: Alternate pitcon import failed: continuation not available'
if alt_import_nleq2:
try:
if os.path.exists(os.path.join(tpath,'pysces','nleq2')) and tpath != '':
os.chdir(os.path.join(tpath,'pysces','nleq2'))
import nleq2
if not __SILENT_START__:
print 'NLEQ2 routines available (A)'
except Exception, ex:
print ex
nleq2_switch = False
print 'INFO: Alternate NLEQ2 import failed: option not available'
os.chdir(savedir)
if DEBUG: print time.strftime('7-%H:%M:%S')
def setWorkPath(path):
"""
Sets the output or working directory.
- *path* a path/subdirectory (will be created if it doesn't exist)
"""
import pysces as P
import os as O
if not O.path.exists(path):
O.makedirs(path)
O.chdir(path)
try:
P.PyscesModel.OUTPUT_DIR = path
P.output_dir = path
if hasattr(P, 'plt') and hasattr(P.plt, 'm') and hasattr(P.plt.m, '__WORK_DIR__'):
P.plt.m.__WORK_DIR__ = path
if hasattr(P, 'plt') and hasattr(P.plt, 'g') and hasattr(P.plt.g, '__WORK_DIR__'):
P.plt.g.__WORK_DIR__ = path
except Exception, ex:
print ex
print 'Path change exception'
del P, O
# This has to come at the end
from PyscesModel import PysMod as model
from PyscesModel import ScanDataObj as ScanDataObj
PyscesModel.interface = interface
from PyscesTest import PyscesTest as test
write = None
try:
from PyscesUtils import WriteOutput
write = WriteOutput()
del WriteOutput
except ImportError, ex:
pass
from PyscesScan import PITCONScanUtils, Scanner
try:
from RateChar import RateChar
if not __SILENT_START__:
print "RateChar is available"
except Exception, ex:
RateChar = None
#print "RateChar not available"
# ParScanner import
try:
from PyscesParScan import ParScanner
if not __SILENT_START__:
print "Parallel scanner is available"
except ImportError, ex:
ParScanner = None
print ex
print "INFO: Parallel scanner not available"
if DEBUG: print time.strftime('9-%H:%M:%S')
if not __SILENT_START__:
print '\nPySCeS environment\n******************'
print 'pysces.model_dir = ' + model_dir
print 'pysces.output_dir = ' + output_dir
print '\n\n***********************************************************************'
print '* Welcome to PySCeS (' + __version__ + ') - Python Simulator for Cellular Systems *'
print '* http://pysces.sourceforge.net *'
## print '* Somewhere In Time *'
print '* Copyright(C) B.G. Olivier, J.M. Rohwer, J.-H.S. Hofmeyr, 2004-2015 *'
print '* Triple-J Group for Molecular Cell Physiology *'
print '* Stellenbosch University, ZA and VU University Amsterdam, NL *'
print '* PySCeS is distributed under the PySCeS (BSD style) licence, see *'
print '* LICENCE.txt (supplied with this release) for details *'
## print '* ** Read about PySCeS ** *'
## print '* http://bioinformatics.oupjournals.org/cgi/content/short/21/4/560 *'
print '***********************************************************************'
try:
del os, key, gplt, mplt
except Exception, ex:
print ex
print '\n\nOops I did it again error ...\n\n'
if DEBUG: print time.strftime('10-%H:%M:%S')
| bsd-3-clause |
adykstra/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 11 | 3714 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
freqs = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = freqs / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, freqs, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
QCaudron/SmallPopTSIR | batch.py | 1 | 26077 | ## IMPORTS
# Numerical packages and methods
import numpy as np
from sklearn import linear_model, gaussian_process
from scipy.optimize import curve_fit
import scipy.stats as st
import scipy.interpolate as interp
import pandas as pd
import statsmodels.nonparametric.smoothers_lowess as lowess
# Monte Carlo and Nonlinear Fitting
import lmfit
# Plotting
import matplotlib.pyplot as plt
import matplotlib
import seaborn
colours = seaborn.color_palette("deep", 8)
# Other
import itertools
import os
import sys
import progressbar
import collections
# Import what ?
prefix = "./data/" + sys.argv[1] + "/"
directory = [f for f in os.listdir(prefix) if f.endswith(".csv")]
names = [i.split(".")[0].capitalize() for i in directory]
# Params
#sensitivity = int(sys.argv[3])
periodicity = 24
penalty = 1e-3
delay = 8
vaccine = 1965
numSvals = 500
Alpha = 0.97 # set to None to infer
gam = 0. # set to None to infer
fixedseasonality = 0
# Define epidemics from a time-series
# This function is for the original time-series
def breakepis(x, sensitivity) :
x2 = x.copy()
x2[x2 < sensitivity] = 0
x2 = np.convolve(x2, np.hanning(9), "same")
z = np.where(x2 > 0)[0]#(np.where(x > sensitivity) and np.where(x2 > sensitivity))[0] # Find epidemics over sensitivity threshold
dz = np.where(np.append(np.insert(np.diff(z), 0, 0), -1) != 1)[0]
epi = []
s = []
d = []
for i in range(len(dz) - 1) :
epi.append(z[dz[i]+3:dz[i+1]-3])
for i, e in enumerate(epi) :
s.append(np.sum(x[e]))
d.append(len(e))
d = np.array(d)
epi = np.delete(epi, np.where(d == 1)[0])
s = np.delete(s, np.where(d == 1)[0])
d = np.delete(d, np.where(d == 1)[0])
z = []
for e in epi :
z = np.hstack((z, e))
return (np.array(epi), s, d, list(z))
# Define epidemics for simulated series
# This function constrains epi durations by the epidemics in the original series
def breaksims(x, sensitivity, realepi) :
starts = [e[0] for e in realepi]
starts.append(len(x))
epi = []
s = []
d = []
for i in range(len(starts)-1) :
try :
d.append(np.where(x[starts[i] : starts[i+1]] <= sensitivity)[0][0])
except IndexError :
d.append(starts[i+1] - starts[i])
s.append(np.sum(x[starts[i] : starts[i]+d[-1]]))
epi.append(range(starts[i], starts[i]+d[-1]))
return (epi, s, d)
# Bootstrap confidence intervals using time-series sampling
"""
# Bootstrapping
def bootstrap(data, M, statistic) :
# M times, sample with replacement
means = np.zeros((M, data.shape[1]))
for i in range(M) :
means[i, :] = statistic(data[np.random.randint(0, data.shape[0], 100), :], axis=0)
stat = np.sort(means, axis=0)
return (stat[int((alpha/2.0)*M)], stat[int((1-alpha/2.0)*M)])
#num_samples, statistic, alpha) :
#n = len(data)
#idx = np.random.randint(0, n, (num_samples, n))
#samples = data[idx]
"""
# For ensured full-domain sampling
def pad(X, Y) :
return np.append(np.insert(X, 0, Y[0]), Y[-1])
# Selective downsampling of cumulative cases
def downsample(X, Y) :
# Interpolate over even grid
#X = np.convolve(X, np.hanning(21), "same")
x = pad(np.linspace(X[0], X[-1], 5*len(X)), X)
y = pad(interp.interp1d(X, Y)(x), Y)
# Calculate third derivative
dy = np.diff(y, n = 3)
dy[np.abs(dy) < 1] = 0
# Find zero-crossings
d = np.where((dy[:-1] > 0) * (dy[1:] <= 0))[0]
d = pad(d, range(len(x)))
return (x[d].reshape(len(x[d]),1), y[d])
def derivative(X, Y) :
# Central finite difference methods to eighth order coefficients
c = np.array([1./280, -4./105, 1./5, -4./5, 0, 4./5, -1./5, 4./105, -1./280])
# Supersample
x = np.linspace(X[0], X[-1], 10000)
y = interp.interp1d(X, Y, "linear")(x)
y[0] = 2*y[1] - y[2]
# Compute derivative using c
dx = np.diff(x)[0]
dy = np.zeros(len(y)-8)
for i in range(4, len(y)-4) :
dy[i-4] = np.sum(c * y[i-4:i+5]) / dx
# Fill in missing left and right values, for now as a stretch
return interp.interp1d(np.linspace(X[0], X[-1], len(dy)), dy)(X)
def qflatten(x) :
if isinstance(x, collections.Iterable):
return [a for i in x for a in qflatten(i)]
else:
return [x]
# Results directory
if not os.path.isdir(prefix + "results") :
os.mkdir(prefix + "results")
# Now, for each file in the directory
for idx, file in enumerate(directory) :
# Sensitivities
if sys.argv[1] == "bornholm" :
sensitivity = 15 # 13
elif sys.argv[1] == "faroe" :
sensitivity = 15#21
else :
if idx == 0 :
sensitivity = 19#14
elif idx == 1 :
sensitivity = 8#19
elif idx == 2 :
sensitivity = 18#15
else :
sensitivity = 7#17
export_t = []
export_ts = []
export_pred = []
export_ciu = []
export_cid = []
export_rho = []
export_r = []
export_rup = []
export_rdn = []
export_sizex = []
export_sizey = []
export_sizeerrx = []
export_sizeerry = []
export_sizeerre = []
export_r2 = []
export_p = []
export_sn = []
export_grad = []
export_pearsonzero = []
export_pearson = []
export_sbar = []
export_Z = []
export_alpha = []
export_b = []
# Import data
data = pd.read_csv(prefix + file)
# Time and vaccinations
t = data["time"].values
v = np.where(t > 1965)[0][0] if t[-1] > vaccine else len(t)
t = t[:v]
# Births and cases
B = data["births"].values[:v]
C = data["reported_cases"].values[:v]
Nt = data["population"].values[:v]
# Find epidemics
#z = np.where(C > sensitivity)[0]
epi, reals, reald, z = breakepis(C, sensitivity)
# Plot
plt.figure()
plt.subplot(211)
plt.plot(t, C, linewidth=2)
plt.title(u"Reported Cases, %s" % names[idx].decode("utf-8"))
for e in epi :
plt.axvspan(t[e[0]], t[e[-1]], color = seaborn.color_palette("deep", 3)[2], alpha=0.3)
plt.xlabel("Time")
plt.ylabel("Cases")
plt.subplot(212)
plt.plot(t, B, linewidth=2)
plt.title(u"Live Births, %s" % names[idx].decode("utf-8"))
plt.xlabel("Time")
plt.ylabel("Births")
plt.tight_layout()
plt.savefig(prefix + u"results/%s_0_timeseries.pdf" % names[idx].decode("utf-8"))
print u"%s Time-Series done." % names[idx].decode("utf-8")
plt.close()
# Susceptible Reconstruction
# Compute cumulative births and incidence
Y = np.cumsum(B)
X = np.cumsum(C)
# Downsample the cumulative plot
#x, y = downsample(X, Y)
x = np.linspace(X[0], X[-1], len(X))
y = interp.interp1d(X, Y)(x)
y[0] = y[1] - (y[2] - y[1])
#x = x[:-1].reshape(len(x)-1,1)
# Rho
Yhat = gaussian_process.GaussianProcess(nugget = 1e-4)
Yhat.fit(x.reshape(len(x), 1), y)
Yhat = Yhat.predict(X.reshape(len(X), 1))
#Yhat = lowess.lowess(y, x.squeeze(), 0.5, return_sorted = False)
#Yhat = interp.interp1d(x, np.insert(Yhat, 0, Yhat[0]))(X)
#Yhat = lowess.lowess(Y, X.squeeze(), 0.5, return_sorted = False)
# Rho : put some splines through Yhat and take the derivative
rho = derivative(X, Yhat)
#interp.UnivariateSpline(x, Yhat.predict(x.reshape(len(x),1))).derivative()(np.linspace(X[0], X[-1], len(X)))
Z = Y - Yhat#.predict(X.reshape(len(X),1))
# Plots
plt.figure()
plt.subplot(221)
plt.plot(t, X, linewidth=2)
plt.plot(t, Y, linewidth=2)
plt.plot(t, Yhat, linewidth=2)#.predict(X.reshape(len(X), 1)), linewidth=2)
plt.title(u"Reported and Inferred Cases, %s" % names[idx].decode("utf-8"))
plt.legend(["Reported Cases", "Cumulative Births", "Inferred Cases"], loc=2)
plt.subplot(222)
plt.axhline(1./np.mean(rho), color="r", linewidth=2)
plt.plot(t, 1./rho, linewidth=2)
plt.ylim([0, np.max(1.1/rho)])
plt.title(r"Inferred Reporting Rate $1/\rho_t = %.2f$" % (1./np.mean(rho)))
#plt.legend([r"$E[1/\rho_t]=%.2f$" % (1./np.mean(rho))])
plt.subplot2grid((2, 2), (1, 0), colspan=2)
plt.plot(t, Z, linewidth=2)
plt.title("Susceptible Dynamics $Z_t$")
plt.xlabel("Time (years)")
plt.tight_layout()
plt.savefig(prefix + u"results/%s_1_susceptible_reconstruction.pdf" % names[idx].decode("utf-8"))
print u"%s Susceptible Reconstruction done." % names[idx].decode("utf-8")
plt.close()
if np.any(1./rho > 1) and np.mean(rho) < 1 :
print "Rho has >1 values; moving onto next geography."
continue
export_t.append(t)
export_ts.append(C)
export_rho.append(1./rho)
# Fit Sbar
# All possible values of Sbar
Svals = np.linspace(1, np.abs(np.min(Z))*10, numSvals)
# Likelihood of fit
l = np.zeros(len(Svals))
# Define our parameters
params = lmfit.Parameters()
if Alpha is None :
params.add("alpha", min=0.5, max=.9999, value=0.95) # Alpha
if gam is None :
params.add("gamma", min=0.0, max=1.0, value = 0.2)
for i in range(periodicity) : # Seasonalities
if fixedseasonality :
params.add("r", value=0.)
rstr = ["r" for i in z[1:]]
else :
params.add("r%d" % i, value=0.)
rstr = ["r%d" % (i % periodicity) for i in z[1:]]
# Objective function
def profile_residuals(params, rho, C, Z, z, Alpha, gamma, Sestimate) :
c = C.copy()
c[np.intersect1d(np.where(C == 0)[0], z).astype(int)] = 1
if Alpha is None :
alphafit = params["alpha"].value
if gamma is None :
gamma = params["gamma"].value
r = [params[i].value for i in rstr]
if np.isnan(Sestimate) :
Sestimate = params["Sest"].value
if Alpha is None :
return gamma * np.log(Nt[z[:-1]]) + alphafit * np.log(rho[z[:-1]]*c[z[:-1]]) + r + np.log(Sestimate + Z[z[:-1]]) - np.log(rho[z[1:]]*c[z[1:]])
else :
return gamma * np.log(Nt[z[:-1]]) + Alpha * np.log(rho[z[:-1]]*c[z[:-1]]) + r + np.log(Sestimate + Z[z[:-1]]) - np.log(rho[z[1:]]*c[z[1:]])
pbar = progressbar.ProgressBar(widgets = \
[progressbar.FormatLabel("Evaluating Sbar Likelihoods"), progressbar.Percentage(), progressbar.Bar(), progressbar.ETA()], \
maxval = numSvals)
pbar.start()
# Compute best fit for each possible Sbar
for i, Sestimate in enumerate(Svals) :
l[i] = lmfit.minimize(profile_residuals, params, args=(rho, C, Z, list(z), Alpha, gam, Sestimate), method="leastsq").chisqr
pbar.update(i)
pbar.finish()
# Purge NaN
Svals = np.delete(Svals, np.where(np.isnan(l)))
l = np.delete(l, np.where(np.isnan(l)))
# Fit window
#fitwindow = 15
#fitwindowL = np.min([fitwindow, np.argmin(l)])
#fitwindowR = np.min([fitwindow, len(Svals) - np.argmin(l)])
# Run again using scan estimate
params.add("Sest", value = Svals[np.argmin(l)])
L = lmfit.minimize(profile_residuals, params, args=(rho, C, Z, z, Alpha, gam, np.nan), method="leastsq")
# Extract parameters and errors
Sbar = L.params["Sest"].value
if fixedseasonality :
r = np.ones(periodicity) * np.exp(L.params["r"].value)
errup = np.ones(periodicity) * np.exp(np.log(r) + 2*L.params["r"].stderr)
errdn = np.ones(periodicity) * np.exp(np.log(r) - 2*L.params["r"].stderr)
else :
r = np.exp([L.params["r" + str(i)].value for i in range(periodicity)])
errup = np.exp(np.log(r) + [2*L.params["r" + str(i)].stderr for i in range(periodicity)])
errdn = np.exp(np.log(r) - [2*L.params["r" + str(i)].stderr for i in range(periodicity)])
alphaSbar = L.params["alpha"].value if Alpha is None else Alpha
gamma = L.params["gamma"].value if gam is None else gam
export_r.append(r)
export_sn.append((Sbar) / Nt)
export_sbar.append(Sbar)
export_Z.append(Z)
export_rup.append(errup)
export_rdn.append(errdn)
export_alpha.append(alphaSbar)
# Plot
plt.figure()
plt.subplot(121)
plt.axvline(x=Sbar, color="red", linewidth=2)
plt.loglog(Svals, l, linewidth=2)
plt.title(u"Goodness of Fit, %s" % names[idx].decode("utf-8"))
plt.xlabel(r"$\bar{S}$")
plt.ylabel(r"$\chi^2$")
plt.legend([r"$\bar{S}$ = %d, $\alpha$ = %.03f, $\gamma$ = %.03f" % (Sbar, alphaSbar, gamma)])
plt.subplot(122)
plt.plot(r, linewidth=3)
plt.fill_between(range(periodicity), errup, errdn, color=colours[0], alpha=0.3)
plt.xlim([0, periodicity])
plt.title("Periodicity")
plt.xlabel("Period")
plt.tight_layout()
plt.savefig(prefix + u"results/%s_2_meanS_periodicity.pdf" % names[idx].decode("utf-8"))
print "%s Mean S and Periodicity done." % names[idx].decode("utf-8")
plt.close()
# Simulations
allss = []
allsd = []
allrs = []
allrd = []
allepi = []
alliei = []
I = []
pbar = progressbar.ProgressBar(widgets = \
[progressbar.FormatLabel("Running Simulations"), progressbar.Percentage(), progressbar.Bar(), progressbar.ETA()], \
maxval = int(sys.argv[2]))
pbar.start()
for q in range(int(sys.argv[2])) :
predI = np.zeros_like(C)
predS = np.zeros_like(C)
starts = [e[0] for e in epi]
# Seed initial epidemic points
for index, e in enumerate(starts) :
ss = []
predI[e] = np.ceil(rho[e] * C[e])
predS[e] = np.ceil(Sbar + Z[e])
for i in range(e+1, len(C)) :
# Old Negative Binomial
"""
predI[i] = np.random.negative_binomial(max(np.round(predI[i-1])**alphaSbar, 1), \
max(np.round(predI[i-1])**alphaSbar, 1) / ( max(np.round(predI[i-1]), 1) + \
r[i % periodicity] * ( predI[i-1] ** alphaSbar ) * predS[i-1] / (Nt[i-1]**gamma)))
"""
# New Negative Binomial
bsi = r[i % periodicity] * predS[i-1] * (predI[i-1] ** (alphaSbar-1)) if np.isfinite(predI[i-1] ** (alphaSbar-1)) else 0
predI[i] = np.random.negative_binomial(max(np.round(predI[i-1]), 1), \
1. / ( 1. + bsi ))
# BINOMIAL now
#predI[i] = np.random.binomial(predS[i-1], 1. - np.exp(- r[i % periodicity] * (predI[i-1] ** alphaSbar)))
predS[i] = max(B[max(i - delay, 0)] + predS[i-1] - predI[i], 0)
#simsizes.append(np.sum(predI[e:]))
#simduration.append(min(np.argmin(predI[e:]), maxdur[index]))
#realduration.append(np.argmin(C[e:] * rho[e:]))
#realsizes.append(np.sum(rho[e:e+realduration[-1]] * C[e:e+realduration[-1]]))
sepi, simsize, simdur = breaksims(predI / rho, sensitivity, epi)
allss.append(simsize)
allrs.append(reals)
allsd.append(simdur)
allrd.append(reald)
allepi.append(sepi)
alliei.append(np.diff(starts))# - simdur[:-1])
I.append(predI)
pbar.update(q)
pbar.finish()
# Clearing out failed epidemics
allss = np.array(allss)
allrs = np.array(allrs)
allsd = np.array(allsd)
allrd = np.array(allrd)
allepi = np.array(allepi)
allss2 = []
allrs2 = []
allsd2 = []
allrd2 = []
allepi2 = []
fs = []
means = np.mean(allss, axis=0)
meand = np.mean(allsd, axis=0)
for i, m in enumerate(means) :
if sum(allss[:, i] > 0.1 * m) > 0 :
fs.append(allss[:, i] > 0.1 * m)
allss2.append(allss[allss[:, i] > 0.1 * m, i])
allrs2.append(allrs[allss[:, i] > 0.1 * m, i])
allepi2.append(allepi[allss[:, i] > 0.1 * m, i])
if sum(allsd[:, i] > 0.1 * meand[i]) > 0 :
allsd2.append(allsd[allsd[:, i] > 0.1 * meand[i], i])
allrd2.append(allrd[allsd[:, i] > 0.1 * meand[i], i])
#allieisize = np.array([s[1:] for s in allss2]).ravel()
#allieidur = np.array([d[1:] for d in allsd2]).ravel()
allss = np.array(qflatten(allss2))
allrs = np.array(qflatten(allrs2))
allsd = np.array(qflatten(allsd2))
allrd = np.array(qflatten(allrd2))
allepi = allepi2
#allepi = np.array(qflatten(allepi))
#alliei = np.array(qflatten(alliei))
#idx = allrs > 500
#allss = allss[idx].reshape(np.sum(idx), 1)
#allrs = allrs[idx].reshape(np.sum(idx), 1)
sslope, sintercept, sr, sp, _ = st.linregress(allrs.squeeze(), allss.squeeze())
dslope, dintercept, dr, dp, _ = st.linregress(allrd.squeeze(), allsd.squeeze())
xs, xd = np.linspace(0, allrs.max(), 500), np.linspace(0, allrd.max(), 500)
ys, yd = sslope * xs + sintercept, dslope * xd + dintercept
#sfit = linear_model.BayesianRidge(fit_intercept=False)
#dfit = linear_model.BayesianRidge(fit_intercept=False)
#dfit.fit(allrd, allsd)
#sfit.fit(allrs, allss)
I = np.array(I)
# Boostrap confidence intervals
low = np.zeros(I.shape[1])
high = np.zeros(I.shape[1])
"""pbar = progressbar.ProgressBar(widgets = \
[progressbar.Percentage(), progressbar.Bar(), progressbar.ETA()], \
maxval = int(I.shape[1]))
#pbar.start()"""
# for i in range(I.shape[1]) :
# low[i], high[i] = bootstrap(I[:, i], 1000, np.mean, 0.95)
# pbar.update(i)
export_pred.append(np.mean(I, axis=0))
cid = []
ciu = []
for i in range(I.shape[1]) :
ff, xx = np.histogram(I[:, i], q/2)
integral = np.sum(ff)
normed = np.cumsum(ff).astype(float) / integral
down = np.where(normed >= 0.025)[0][0]
up = np.where(normed >= 0.975)[0][0]
cid.append(xx[down])
ciu.append(xx[up])
export_ciu.append(ciu)
export_cid.append(cid)
zerocorrect = np.where(np.mean(I, axis=0) >= 0.5) or np.where(rho*C >= 0.5)
export_pearsonzero.append(st.pearsonr(np.mean(I, axis=0)[zerocorrect], rho[zerocorrect]*C[zerocorrect])[0] **2)
export_pearson.append(st.pearsonr(np.mean(I, axis=0), rho*C)[0] **2)
# Plot
plt.figure()
#plt.fill_between(t, low, high, color = colours[2], linewidth=1, alpha=0.4)
plt.plot(t, np.mean(I, axis=0), color = colours[2], linewidth=2)
plt.plot(t, C*rho, c = colours[0], linewidth=2, alpha = 0.8)
plt.tight_layout()
plt.savefig(prefix + u"results/%s_3_predictions.pdf" % names[idx].decode("utf-8"))
print u"%s Predictions done." % names[idx].decode("utf-8")
plt.close()
errsx = [i[0] for i in allrs2]
errsy = [np.mean(i) for i in allss2]
errse = [2*np.std(i) for i in allss2]
errdx = [i[0] for i in allrd2]
errdy = [np.mean(i) for i in allsd2]
errde = [2*np.std(i) for i in allsd2]
export_sizex.append(xs)
export_sizey.append(ys)
export_sizeerrx.append(errsx)
export_sizeerry.append(errsy)
export_sizeerre.append(errse)
export_r2.append(sr**2)
export_grad.append(sslope)
export_p.append(sp)
# Size and Duration of Epidemics : Real vs Predicted
plt.figure()
plt.subplot(211)
plt.title(u"%s, Sizes : Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (names[idx].decode("utf-8"), sslope, sintercept, sr**2, sp))
plt.xlabel("Real Size")
plt.ylabel("Simulated Size")
#nepi = len(allrs) / (q+1)
plt.errorbar(errsx, errsy, yerr = errse, fmt="o", ms=10, c=colours[2])
#plt.scatter(allrs, allss, alpha=0.3, c=colours[2], s=35)
plt.plot(xs, ys, linewidth=2, c=colours[0])
plt.subplot(212)
plt.title("Durations : Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (dslope, dintercept, dr**2, dp))
plt.xlabel("Real Duration")
plt.ylabel("Simulated Duration")
plt.errorbar(errdx, errdy, yerr = errde, fmt="o", ms=10, c=colours[2])
#plt.scatter(allrd, allsd, alpha=0.3, c=colours[2], s=35)
plt.plot(xd, yd, linewidth=2, c=colours[0])
plt.tight_layout()
plt.savefig(prefix + u"results/%s_4_sizes_durations.pdf" % names[idx].decode("utf-8"))
print u"%s Sizes and Durations done." % names[idx].decode("utf-8")
plt.close()
# Susceptibles vs Sizes
s0 = np.array([Sbar + Z[e[0]] for e in epi])#np.array([np.mean(Sbar + Z[e]) for e in epi])
slopes0, intercepts0, rs0, ps0, _ = st.linregress(s0[reals > 20], reals[reals > 20])
s1 = []
for e in allepi :
for i in e :
s1.append(np.mean(Sbar + Z[i]))
slopes1, intercepts1, rs1, ps1, _ = st.linregress(s1, allss)
s0x = np.linspace(0, s0.max(), 500)
s0y = s0x * slopes0 + intercepts0
s1x = np.linspace(0, np.max(s1), 500)
s1y = s1x * slopes1 + intercepts1
# Plot
plt.figure()
plt.subplot(211)
plt.scatter(s0, reals, c = colours[0])
plt.plot(s0x, s0y, linewidth=2)
plt.title(u"%s S0 vs Real Size, Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (names[idx].decode("utf-8"), slopes0, intercepts0, rs0**2, ps0))
plt.subplot(212)
plt.scatter(s1, allss, c = colours[0], alpha=0.3)
plt.plot(s1x, s1y, linewidth=2)
plt.title("S0 vs Simulated Size, Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (slopes1, intercepts1, rs1**2, ps1))
plt.tight_layout()
plt.savefig(prefix + u"results/%s_5_s0_vs_size.pdf" % names[idx].decode("utf-8"))
print u"%s S0 vs Sizes done." % names[idx].decode("utf-8")
plt.close()
"""
# Inter-epidemic intervals
plt.figure()
ieix = np.linspace(alliei.min(), alliei.max(), 500)
rieiss, rieisi, rieisr, rieisp, _ = st.linregress(np.diff(starts), reals[1:])
rieids, rieidi, rieidr, rieidp, _ = st.linregress(np.diff(starts), reald[1:])
ieiss, ieisi, ieisr, ieisp, _ = st.linregress(alliei, allieisize)
ieids, ieidi, ieidr, ieidp, _ = st.linregress(alliei, allieidur)
plt.subplot(211)
plt.scatter(alliei, allieisize, alpha=0.2, c=colours[0])
plt.scatter(np.diff(starts), reals[1:], s=100, c=colours[2])
plt.plot(ieix, rieiss * ieix + rieisi, linewidth=2)
plt.plot(ieix, ieiss * ieix + ieisi, linewidth=2)
plt.title("%s, IEI vs Size : Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (names[idx], ieiss, ieisi, ieisr, ieisp))
plt.xlabel("Interepidemic Interval (biweeks)")
plt.ylabel("Size of Epidemic")
plt.legend(["Real Fit", "Sim Fit", "Simulated", "Real"])
plt.subplot(212)
plt.scatter(alliei, allieidur, alpha=0.2, c=colours[0])
plt.scatter(np.diff(starts), reald[1:], s=100, c=colours[2])
plt.plot(ieix, rieids * ieix + rieidi, linewidth=2)
plt.plot(ieix, ieids * ieix + ieidi, linewidth=2)
plt.title("IEI vs Duration : Slope = %.3f, Intercept = %.1f, R^2 = %.3f, p = %.02e" % (ieids, ieidi, ieidr, ieidp))
plt.xlabel("Interepidemic Interval (biweeks)")
plt.ylabel("Duration of Epidemic")
plt.legend(["Real Fit", "Sim Fit", "Simulated", "Real"])
plt.tight_layout()
plt.savefig(prefix + "results/%s_6_iei.pdf" % names[idx].decode("utf-8"))
print "IEI done."
"""
# R estimates
plt.figure(figsize=(16,9), dpi=600)
# Starting points of each epidemic
L = []
for e in epi[:-1] :
L.append(len(e))
L = np.cumsum(L)
# R vs t
Reff = C[1:].astype(float) / C[:-1]
# S0 at beginning of each epi, and max R for each of those epis
S0 = [Sbar + Z[e[0]] for e in epi]
Rm = [Reff[e].max() for e in epi]
plt.subplot2grid((2,2), (0,0))
plt.plot(C[z], linewidth=2)
for e in L :
plt.axvline(e, c=colours[2])
plt.title(u"%s, Incidence" % names[idx].decode("utf-8"))
plt.ylabel("Cases")
plt.subplot2grid((2, 2), (1, 0))
plt.plot(Reff[z], linewidth=2)
plt.axhline(1, c=colours[1])
for e in L :
plt.axvline(e, c=colours[2])
plt.title("Effection Reproduction Ratio")
plt.ylabel("$R_{eff}$")
plt.subplot2grid((2,2), (0, 1), rowspan=2)
plt.scatter(S0, Rm)
plt.title("$R_0$ as Max $R_{eff}$ vs $S_0$")
plt.xlabel("$S_0$")
plt.ylabel("max[$R_eff$]")
plt.tight_layout()
plt.savefig(prefix + u"results/%s_7_r0.pdf" % names[idx].decode("utf-8"))
print "R0 done."
starts = [e[0] for e in epi]
b = []
for s in range(len(starts)-1) :
b.append(np.sum(B[starts[s] : starts[s+1]]))
export_b.append(b)
pd.DataFrame({ "t" : export_t,
"ts" : export_ts,
"pred" : export_pred,
"predciu" : export_ciu,
"predcid" : export_cid,
"rho" : export_rho,
"r" : export_r,
"rup" : export_rup,
"rdn" : export_rdn,
"sizex" : export_sizex,
"sizey" : export_sizey,
"sizeerrx" : export_sizeerrx,
"sizeerry" : export_sizeerry,
"sizeerre" : export_sizeerre,
"r2" : export_r2,
"p" : export_p,
"pearson" : export_pearson,
"pearsonzero" : export_pearsonzero,
"sn" : export_sn,
"grad" : export_grad,
"sbar" : export_sbar,
"Z" : export_Z,
"alpha": export_alpha,
"b" : export_b
}).to_json(u"paper/figures/%s.json" % names[idx].decode("utf-8"))
| mit |
nmayorov/scipy | doc/source/tutorial/examples/optimize_global_1.py | 15 | 1752 | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def eggholder(x):
return (-(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47))))
-x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))))
bounds = [(-512, 512), (-512, 512)]
x = np.arange(-512, 513)
y = np.arange(-512, 513)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
results = dict()
results['shgo'] = optimize.shgo(eggholder, bounds)
results['DA'] = optimize.dual_annealing(eggholder, bounds)
results['DE'] = optimize.differential_evolution(eggholder, bounds)
results['BH'] = optimize.basinhopping(eggholder, bounds)
results['shgo_sobol'] = optimize.shgo(eggholder, bounds, n=200, iters=5,
sampling_method='sobol')
fig = plt.figure(figsize=(4.5, 4.5))
ax = fig.add_subplot(111)
im = ax.imshow(eggholder(xy), interpolation='bilinear', origin='lower',
cmap='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
def plot_point(res, marker='o', color=None):
ax.plot(512+res.x[0], 512+res.x[1], marker=marker, color=color, ms=10)
plot_point(results['BH'], color='y') # basinhopping - yellow
plot_point(results['DE'], color='c') # differential_evolution - cyan
plot_point(results['DA'], color='w') # dual_annealing. - white
# SHGO produces multiple minima, plot them all (with a smaller marker size)
plot_point(results['shgo'], color='r', marker='+')
plot_point(results['shgo_sobol'], color='r', marker='x')
for i in range(results['shgo_sobol'].xl.shape[0]):
ax.plot(512 + results['shgo_sobol'].xl[i, 0],
512 + results['shgo_sobol'].xl[i, 1],
'ro', ms=2)
ax.set_xlim([-4, 514*2])
ax.set_ylim([-4, 514*2])
fig.tight_layout()
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.